Esempio n. 1
0
    def compute(self):  # called after each epoch
        feats = torch.cat(self.feats, dim=0)
        if self.feat_norm:
            print("The test feature is normalized")
            feats = torch.nn.functional.normalize(feats, dim=1,
                                                  p=2)  # along channel
        # query
        qf = feats[:self.num_query]
        q_pids = np.asarray(self.pids[:self.num_query])
        q_camids = np.asarray(self.camids[:self.num_query])
        # gallery
        gf = feats[self.num_query:]
        g_pids = np.asarray(self.pids[self.num_query:])
        g_camids = np.asarray(self.camids[self.num_query:])
        if self.reranking:
            print('=> Enter reranking')
            # distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)
            distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3)

        else:
            print('=> Computing DistMat with cosine similarity')
            distmat = cosine_similarity(qf, gf)
        cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)

        return cmc, mAP, distmat, self.pids, self.camids, qf, gf
Esempio n. 2
0
    def compute(self):
        feats = torch.cat(self.feats, dim=0)
        if self.feat_norm == 'yes':
            print("The test feature is normalized")
            feats = torch.nn.functional.normalize(feats, dim=1, p=2)

        # query
        qf = feats[:self.num_query]
        q_pids = np.asarray(self.pids[:self.num_query])
        q_camids = np.asarray(self.camids[:self.num_query])
        # gallery
        gf = feats[self.num_query:]
        g_pids = np.asarray(self.pids[self.num_query:])
        g_camids = np.asarray(self.camids[self.num_query:])

        # m, n = qf.shape[0], gf.shape[0]
        # distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
        #           torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
        # distmat.addmm_(1, -2, qf, gf.t())
        # distmat = distmat.cpu().numpy()
        print("Enter reranking")
        distmat = re_ranking(qf, gf, k1=k11, k2=k22, lambda_value=vv)
        cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)

        return cmc, mAP
Esempio n. 3
0
    def compute(self,
                reranking_parameter=[20, 6, 0.3]):  # called after each epoch
        feats = torch.cat(self.feats, dim=0)
        if self.feat_norm:
            print("The test feature is normalized")
            feats = torch.nn.functional.normalize(feats, dim=1,
                                                  p=2)  # along channel
        # query
        qf = feats[:self.num_query]
        q_path = self.img_name_path[:self.num_query]
        # gallery
        gf = feats[self.num_query:]
        g_path = self.img_name_path[self.num_query:]
        if self.reranking:
            print('=> Enter reranking')
            print('k1={}, k2={}, lambda_value={}'.format(
                reranking_parameter[0], reranking_parameter[1],
                reranking_parameter[2]))
            distmat = re_ranking(qf,
                                 gf,
                                 k1=reranking_parameter[0],
                                 k2=reranking_parameter[1],
                                 lambda_value=reranking_parameter[2])

        else:
            print('=> Computing DistMat with cosine similarity')
            distmat = cosine_similarity(qf, gf)

        return distmat, q_path, g_path
Esempio n. 4
0
def do_inference(cfg, model, val_loader, num_query, query_name, gallery_name):

    model.eval()
    model.cuda()
    features = torch.FloatTensor().cuda()
    for (input_img, pid, cid) in val_loader:
        input_img = input_img.cuda()
        input_img_mirror = input_img.flip(dims=[3])
        outputs = model(input_img)
        outputs_mirror = model(input_img_mirror)
        f = outputs + outputs_mirror
        # flip
        features = torch.cat((features, f), 0)

    if cfg.TEST.RE_RANKING:
        feats = torch.nn.functional.normalize(features, dim=1, p=2)
        qf = feats[:num_query]
        gf = feats[num_query:]
        ranking_parameter = cfg.TEST.RE_RANKING_PARAMETER
        k1 = ranking_parameter[0]
        k2 = ranking_parameter[1]
        lambda_value = ranking_parameter[2]
        distmat = re_ranking(qf, gf, k1=k1, k2=k2, lambda_value=lambda_value)
    else:
        qf = features[:num_query]
        gf = features[num_query:]
        distmat = cosine_dist(qf, gf)
        distmat = distmat.cpu().numpy()

    #np.save(os.path.join(cfg.OUTPUT_DIR, cfg.TEST.DIST_MAT) , distmat)
    num_q, num_g = distmat.shape
    indices = np.argsort(distmat, axis=1)
    max_10_indices = indices[:, :10]
    res_dict = dict()
    for q_idx in range(num_q):
        filename = query_name[q_idx].split("/")[-1]
        max_10_files = [
            gallery_name[i].split("/")[-1] for i in max_10_indices[q_idx]
        ]
        res_dict[filename] = max_10_files
    with open('%s/submission.csv' % cfg.OUTPUT_DIR, 'w') as file:
        for k, v in res_dict.items():
            writer_string = "%s,{%s,%s,%s,%s,%s,%s,%s,%s,%s,%s}\n" % (
                k, v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7], v[8], v[9])
            file.write(writer_string)
    file.close()
    def compute(self, save_dir):  # called after each epoch
        feats = torch.cat(self.feats, dim=0)
        if self.feat_norm:
            print("The test feature is normalized")
            feats = torch.nn.functional.normalize(feats, dim=1,
                                                  p=2)  # along channel
        # query
        qf = feats[:self.num_query]
        # gallery
        gf = feats[self.num_query:]

        img_name_q = self.img_path_list[:self.num_query]
        img_name_g = self.img_path_list[self.num_query:]
        gallery_tids = np.asarray(self.tids[self.num_query:])

        if self.reranking_track:
            print('=> Enter track reranking')
            # distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)
            qf = qf.cpu().numpy()
            gf = gf.cpu().numpy()
            distmat = self.track_ranking(qf, gf, gallery_tids,
                                         self.unique_tids)
        elif self.reranking:
            print('=> Enter reranking')
            # distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)
            distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3)
        else:
            print('=> Computing DistMat with cosine similarity')
            distmat = cosine_similarity(qf, gf)

        sort_distmat_index = np.argsort(distmat, axis=1)
        print(sort_distmat_index.shape, 'sort_distmat_index.shape')
        print(sort_distmat_index, 'sort_distmat_index')
        with open(os.path.join(save_dir, 'track2.txt'), 'w') as f:
            for item in sort_distmat_index:
                for i in range(99):
                    f.write(str(item[i] + 1) + ' ')
                f.write(str(item[99] + 1) + '\n')
        print('writing result to {}'.format(
            os.path.join(save_dir, 'track2.txt')))

        return distmat, img_name_q, img_name_g, qf, gf
    def infer(self, query_feat, thres=0.6, reranking=True, rerank_range=50):

        #cosine
        try:
            dist_mat = torch.nn.functional.cosine_similarity(
                query_feat, self.all_gal_feat).cpu().numpy()
        except RuntimeError:
            print('Table is empty.')
            return []
        indices = np.argsort(
            dist_mat)[::-1][:
                            rerank_range]  #to test if use 50 or use all better

        if reranking:
            candidate_gal_feat = torch.index_select(
                self.all_gal_feat, 0,
                torch.tensor([indices]).cuda(0)[0])
            rerank_dist = re_ranking(query_feat,
                                     candidate_gal_feat,
                                     k1=30,
                                     k2=6,
                                     lambda_value=0.3)[0]
            rerank_idx = np.argsort(1 - rerank_dist)[::-1]
            indices = np.array([indices[i] for i in rerank_idx])
            rerank_dist_ind = np.array([rerank_dist[i] for i in rerank_idx])

        all_result = []
        for idx, i in enumerate(indices):
            if dist_mat[i] < thres:
                continue

            tmp = dict()
            tmp['idx'] = i
            tmp['cam_id'] = self.all_cam_id[i]
            tmp['img_id'] = self.all_img_id[i]
            tmp['dist'] = dist_mat[i]
            tmp['rerank_dist'] = rerank_dist_ind[idx]
            tmp['ranking'] = idx

            all_result.append(tmp)
        return all_result
 def compute(self, name, K, height):  # called after each epoch
     feats = torch.cat(self.feats, dim=0)
     if self.feat_norm:
         print("The test feature is normalized")
         feats = torch.nn.functional.normalize(feats, dim=1,
                                               p=2)  # along channel
     # query
     qf = feats[:self.num_query]
     # dis = qf.mm(qf.t())
     # top = dis.topk(10)[1]
     # new = torch.zeros((qf.shape[0], qf.shape[1])).cuda()
     # for i in range(qf.shape[0]):
     #     new[i] = (9 * qf[i] + qf[top[i][1:]].sum(-2)) / 18
     # qf = new
     q_pids = np.asarray(self.pids[:self.num_query])
     q_camids = np.asarray(self.camids[:self.num_query])
     # gallery
     gf = feats[self.num_query:]
     g_pids = np.asarray(self.pids[self.num_query:])
     g_camids = np.asarray(self.camids[self.num_query:])
     pickle.dump(g_camids, open('g_camids.pkl', 'wb'))
     distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)
     distmat = torch.tensor(distmat)
     torch.save(
         distmat,
         'dis/' + str(name) + '_' + str(K) + '_' + str(height) + '.pth')
     distmat = distmat[:self.num_query, self.num_query:]
     rank = distmat.topk(1, largest=False)[1]
     rank_list = []
     for num in rank:
         rank_list.append(g_camids[num])
     rank_list = np.array(rank_list)
     print('提交文件' + 'submit_' + str(name) + '_' + str(K) + '_' +
           str(height) + '.csv已保存')
     np.savetxt('submit/submit_' + str(name) + '_' + str(K) + '_' +
                str(height) + '.csv',
                rank_list,
                delimiter='\n',
                fmt='%s')
Esempio n. 8
0
    def compute(self,
                reranking_parameter=[20, 6, 0.3]):  # called after each epoch
        feats = torch.cat(self.feats, dim=0)
        if self.feat_norm:
            print("The test feature is normalized")
            feats = torch.nn.functional.normalize(feats, dim=1,
                                                  p=2)  # along channel
        # query
        qf = feats[:self.num_query]
        q_path = self.img_name_path[:self.num_query]
        # gallery
        gf = feats[self.num_query:]
        g_path = self.img_name_path[self.num_query:]
        if self.reranking:
            print('=> Enter reranking')
            print('k1={}, k2={}, lambda_value={}'.format(
                reranking_parameter[0], reranking_parameter[1],
                reranking_parameter[2]))
            distmat = re_ranking(qf,
                                 gf,
                                 k1=reranking_parameter[0],
                                 k2=reranking_parameter[1],
                                 lambda_value=reranking_parameter[2])

        else:
            print('=> Computing DistMat with cosine similarity')
            distmat = cosine_similarity(qf, gf)
        print(distmat, 'distmat')
        num_q, num_g = distmat.shape
        indices = np.argsort(distmat, axis=1)
        data = dict()
        print(len(q_path), 'self.img_name_q')
        print(len(g_path), 'self.img_name_g')
        for q_idx in range(num_q):
            order = indices[q_idx]  # select one row
            result_query = np.array(g_path)[order[:self.max_rank]]
            data[q_path[q_idx]] = [str(i) for i in result_query]
        return data, distmat, q_path, g_path
Esempio n. 9
0
    def track_ranking(self, qf, gf, gallery_tids, unique_tids, fic=False):
        origin_dist = euclidean_distance(qf, gf)
        m, n = qf.shape[0], gf.shape[0]
        feature_dim = qf.shape[1]
        gallery_tids = np.asarray(gallery_tids)
        m, n = qf.shape[0], gf.shape[0]
        unique_tids = np.asarray(unique_tids)
        track_gf = np.zeros((len(unique_tids), feature_dim))
        dist = np.zeros((m, n))
        gf_tids = sorted(list(set(gallery_tids)))
        track_gf = []
        for i, tid in enumerate(gf_tids):
            temp_dist = origin_dist[:, gallery_tids == tid]
            temp_min = np.min(temp_dist, axis=1)
            index = np.where(temp_min < 0.6)[0]
            if len(index) < 1:
                index = np.where(temp_min == np.min(temp_min))[0]
            weight = temp_dist[index, :].mean(axis=0)
            weight = 1.0 / (weight + 0.01)
            weight = weight / np.sum(weight)
            weight = torch.tensor(weight).cuda().unsqueeze(0)
            temp_gf = torch.mm(weight, gf[gallery_tids == tid, :])
            track_gf.append(torch.mm(weight, gf[gallery_tids == tid, :]))
        track_gf = torch.cat(track_gf)
        origin_track_dist = re_ranking(qf,
                                       track_gf,
                                       k1=7,
                                       k2=2,
                                       lambda_value=0.6)

        cam_dist = np.load('./track_cam_rk.npy')
        view_dist = np.load('./track_view_rk.npy')
        track_dist = origin_track_dist - 0.1 * cam_dist - 0.05 * view_dist

        for i, tid in enumerate(gf_tids):
            dist[:, gallery_tids == tid] = track_dist[:, i:(i + 1)]
        return dist, origin_track_dist
Esempio n. 10
0
    def compute(self, save_dir):  # called after each epoch
        feats = torch.cat(self.feats, dim=0)
        if self.feat_norm:
            print("The test feature is normalized")
            feats = torch.nn.functional.normalize(feats, dim=1,
                                                  p=2)  # along channel
        # query
        qf = feats[:self.num_query]
        # gallery
        gf = feats[self.num_query:]

        img_name_q = self.img_path_list[:self.num_query]
        img_name_g = self.img_path_list[self.num_query:]
        gallery_tids = np.asarray(self.tids[self.num_query:])

        if self.reranking:
            print('=> Enter reranking')
            # distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)
            distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3)
        else:
            print('=> Computing DistMat with cosine similarity')
            distmat = cosine_similarity(qf, gf)

        return distmat, img_name_q, img_name_g, qf, gf
Esempio n. 11
0
def test_rerank(model,
                queryloader,
                galleryloader,
                batch_size,
                use_gpu,
                ranks=[1, 5, 10],
                return_distmat=False):
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print('Extracted features for query set, obtained {}-by-{} matrix'.
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
            if use_gpu:
                imgs = imgs.cuda()

            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print('Extracted features for gallery set, obtained {}-by-{} matrix'.
              format(gf.size(0), gf.size(1)))

    print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(
        batch_time.avg, batch_size))

    m, n = qf.size(0), gf.size(0)
    # distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
    #           torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    # distmat.addmm_(1, -2, qf, gf.t())
    # distmat = distmat.numpy()

    distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3)
    print('Computing CMC and mAP')
    # cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, target_names)
    cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, 10)

    print('Results ----------')
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in ranks:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
    print('------------------')

    return cmc[0], distmat
Esempio n. 12
0
    def forward(self, U_src, U_tgt, kf_gate, reid_thr, iou, start_src, end_src,
                start_tgt, end_tgt, seq_name, inverse_flag):

        # different aggregation weights in sequences taken by static/moving camera
        if seq_name in cfg.DATA.STATIC:
            Mp0 = torch.matmul(U_src.transpose(1, 2), U_tgt) + iou.unsqueeze(0)
        elif seq_name in cfg.DATA.MOVING:
            Mp0 = torch.matmul(U_src.transpose(1, 2), U_tgt)

        emb1, emb2 = U_src.transpose(1, 2), U_tgt.transpose(1, 2)

        # Cross-graph GCN
        m_emb1 = torch.bmm(Mp0, emb2)
        m_emb2 = torch.bmm(Mp0.transpose(1, 2), emb1)
        lambda_1 = (
            torch.norm(emb1, p=2, dim=2, keepdim=True).repeat(1, 1, 512) /
            torch.norm(m_emb1, p=2, dim=2, keepdim=True).repeat(1, 1, 512))
        lambda_2 = (
            torch.norm(emb2, p=2, dim=2, keepdim=True).repeat(1, 1, 512) /
            torch.norm(m_emb2, p=2, dim=2, keepdim=True).repeat(1, 1, 512))
        emb1_new = F.relu(self.cross_graph(emb1 + lambda_1 * m_emb1))
        emb2_new = F.relu(self.cross_graph(emb2 + lambda_2 * m_emb2))

        emb1_new = F.normalize(emb1_new.squeeze(0), p=2, dim=1).unsqueeze(0)
        emb2_new = F.normalize(emb2_new.squeeze(0), p=2, dim=1).unsqueeze(0)

        # calculate the vertex-vertex similarity and edge-edge similarity
        Mp_before = torch.matmul(emb1_new, emb2_new.transpose(1, 2)).squeeze(0)
        # reranking
        Mp = torch.Tensor(
            1.0 - re_ranking(Mp_before,
                             emb1_new.squeeze(0) @ emb1_new.squeeze(0).t(),
                             emb2_new.squeeze(0) @ emb2_new.squeeze(0).t(),
                             k1=1,
                             k2=1))

        Mpp = Mp.transpose(0, 1).reshape(Mp.shape[0] *
                                         Mp.shape[1]).unsqueeze(0).t()
        if Mp.shape[0] == 1 and Mp.shape[1] == 1:
            thr_flag = torch.Tensor(Mp.shape[0], Mp.shape[1]).zero_()
            for i in range(Mp.shape[0]):
                for j in range(Mp.shape[1]):
                    if kf_gate[i][j] == -1 or iou[i][
                            j] == 0 or Mp_before[i][j] < reid_thr:
                        thr_flag[i][j] = 1
            return np.array([0, 0]), thr_flag

        kro_one_src = torch.ones(emb1_new.shape[1], emb1_new.shape[1])
        kro_one_tgt = torch.ones(emb2_new.shape[1], emb2_new.shape[1])
        mee1 = self.kronecker(kro_one_tgt, start_src).long()
        mee2 = self.kronecker(kro_one_tgt, end_src).long()
        mee3 = self.kronecker(start_tgt, kro_one_src).long()
        mee4 = self.kronecker(end_tgt, kro_one_src).long()
        src = torch.cat([
            emb1_new.squeeze(0).unsqueeze(1).repeat(1, emb1_new.shape[1], 1),
            emb1_new.repeat(emb1_new.shape[1], 1, 1)
        ],
                        dim=2)
        tgt = torch.cat([
            emb2_new.squeeze(0).unsqueeze(1).repeat(1, emb2_new.shape[1], 1),
            emb2_new.repeat(emb2_new.shape[1], 1, 1)
        ],
                        dim=2)
        src_tgt = (src.reshape(-1, 1024) @ tgt.reshape(-1, 1024).t()).reshape(
            emb1_new.shape[1], emb1_new.shape[1], emb2_new.shape[1],
            emb2_new.shape[1])
        mask = ((mee1 - mee2).bool() & (mee3 - mee4).bool()).float()
        M = src_tgt[mee1, mee2, mee3, mee4] / 2
        M = mask * M
        M = M.unsqueeze(0)
        k = (Mp.shape[0] - 1) * (Mp.shape[1] - 1)
        M[0] = k * torch.eye(M.shape[1], M.shape[2]) - M[0]
        if Mp.shape[0] == 1 or Mp.shape[1] == 1:
            M[0] = torch.zeros_like(M[0])
            print('single')
        else:
            M[0] = torch.cholesky(M[0])

        # solve relaxed quadratic programming
        if Mp.shape[0] > Mp.shape[1]:
            n, m, p = M.shape[1], Mp.shape[1], Mp.shape[0]
            a = np.zeros((p, n))
            b = np.zeros((m, n))
            for i in range(p):
                for j in range(m):
                    a[i][j * p + i] = 1
            for i in range(m):
                b[i][i * p:(i + 1) * p] = 1
            x = cp.Variable(n)
            obj = cp.Minimize(0.5 * cp.sum_squares(M.squeeze(0).numpy() @ x) -
                              Mpp.numpy().T @ x)
            cons = [a @ x <= 1, b @ x == 1, x >= 0]
            prob = cp.Problem(obj, cons)
            prob.solve(solver=cp.SCS, gpu=True, use_indirect=True)
            s = torch.tensor(x.value)
            s = s.reshape(Mp.shape[1], Mp.shape[0]).t().unsqueeze(0)
            s = torch.relu(s) - torch.relu(s - 1)
        elif Mp.shape[0] == Mp.shape[1]:
            n, m, p = M.shape[1], Mp.shape[0], Mp.shape[1]
            x = cp.Variable(n)
            a = np.zeros((m + p, n))
            for i in range(p):
                for j in range(m):
                    a[i][j * p + i] = 1
            for i in range(m):
                a[i + p][i * p:(i + 1) * p] = 1
            obj = cp.Minimize(0.5 * cp.sum_squares(M.squeeze(0).numpy() @ x) -
                              Mpp.numpy().T @ x)
            cons = [a @ x == 1, x >= 0]
            prob = cp.Problem(obj, cons)
            prob.solve(solver=cp.SCS, gpu=True, use_indirect=True)
            s = torch.tensor(x.value)
            s = s.reshape(Mp.shape[1], Mp.shape[0]).t().unsqueeze(0)
            s = torch.relu(s) - torch.relu(s - 1)

        # thresholds
        thr_flag = torch.Tensor(Mp.shape[0], Mp.shape[1]).zero_()
        for i in range(Mp.shape[0]):
            for j in range(Mp.shape[1]):
                if kf_gate[i][j] == -1 or iou[i][
                        j] == 0 or Mp_before[i][j] < reid_thr:
                    thr_flag[i][j] = 1

        # greedy matching from matching score map
        if s.shape[1] >= s.shape[2]:
            s = s.squeeze(0).t()
            s = np.array(s)
            n = min(s.shape)
            Y = s.copy()
            Z = np.zeros(Y.shape)
            replace = np.min(Y) - 1
            for i in range(n):
                z = np.unravel_index(np.argmax(Y), Y.shape)
                Z[z] = 1
                Y[z[0], :] = replace
                Y[:, z[1]] = replace
            match_tra = np.argmax(Z, 1)
            match_tra = torch.tensor(match_tra)
            if inverse_flag == False:
                output = np.array(
                    torch.cat([
                        match_tra.unsqueeze(0),
                        torch.arange(len(match_tra)).unsqueeze(0)
                    ], 0)).T
            if inverse_flag == True:
                thr_flag = thr_flag.t()
                output = np.array(
                    torch.cat([
                        torch.arange(len(match_tra)).unsqueeze(0),
                        match_tra.unsqueeze(0)
                    ], 0)).T

        return output, thr_flag
Esempio n. 13
0
    def compute(self,
                reranking_parameter=[20, 6, 0.3]):  # called after each epoch
        feats = torch.cat(self.feats, dim=0)

        # if use pcb global feature ,use next method
        if self.cfg.MODEL.IF_USE_PCB:
            if self.cfg.TEST.PCB_GLOBAL_FEAT_ENSEMBLE:
                pcb_feats = torch.cat(self.pcb_feat, dim=0)
                pcb_qf = pcb_feats[:self.num_query]
                pcb_gf = pcb_feats[self.num_query:]

        if self.feat_norm:
            print("The test feature is normalized")
            feats = torch.nn.functional.normalize(feats, dim=1,
                                                  p=2)  # along channel
        # query
        qf = feats[:self.num_query]
        q_path = self.img_name_path[:self.num_query]
        # gallery
        gf = feats[self.num_query:]
        g_path = self.img_name_path[self.num_query:]
        if self.reranking:
            print('=> Enter reranking')
            print('k1={}, k2={}, lambda_value={}'.format(
                reranking_parameter[0], reranking_parameter[1],
                reranking_parameter[2]))
            distmat = re_ranking(qf,
                                 gf,
                                 k1=reranking_parameter[0],
                                 k2=reranking_parameter[1],
                                 lambda_value=reranking_parameter[2])
            qf = qf.cpu()
            gf = gf.cpu()
            torch.cuda.empty_cache()
            if self.cfg.MODEL.IF_USE_PCB:
                if self.cfg.TEST.PCB_GLOBAL_FEAT_ENSEMBLE:
                    pcb_distmat = re_ranking(
                        pcb_qf,
                        pcb_gf,
                        k1=reranking_parameter[0],
                        k2=reranking_parameter[1],
                        lambda_value=reranking_parameter[2])
                    # del pcb_qf
                    # del pcb_gf
                    # torch.cuda.empty_cache()
                else:
                    if self.cfg.TEST.USE_PCB_MERGE_FEAT:
                        pcb_feats = torch.cat(self.pcb_feat, dim=0)
                        pcb_qf = pcb_feats[:self.num_query]
                        pcb_gf = pcb_feats[self.num_query:]
                        pcb_distmat = re_ranking(
                            pcb_qf,
                            pcb_gf,
                            k1=reranking_parameter[0],
                            k2=reranking_parameter[1],
                            lambda_value=reranking_parameter[2])
                        ''' 
                        all_pcb_distmat = []
                        pcb_feats = torch.cat(self.pcb_feat, dim=0)

                        pcb_qf = pcb_feats[:self.num_query]
                        pcb_gf = pcb_feats[self.num_query:]
                        print(pcb_qf.shape)
                        m = pcb_qf.shape[0]
                        n = pcb_gf.shape[0]
                        for j in range(m//300+1):
                            temp_pcb_qf = pcb_qf[j * 300:j * 300 + 300]
                            temp_pcb_dist = []
                            for i in range(n // 600 + 1):
                                temp_pcb_gf = pcb_gf[i * 600:i * 600 + 600]
                                pcb_distmat_i = re_ranking(temp_pcb_qf, temp_pcb_gf, k1=reranking_parameter[0],
                                                         k2=reranking_parameter[1],
                                                         lambda_value=reranking_parameter[2])
                                temp_pcb_dist.append(pcb_distmat_i)
                            all_pcb_distmat.append(np.concatenate(temp_pcb_dist,axis=1))
                        pcb_distmat = np.concatenate(all_pcb_distmat, axis=0)
                        '''

                        # for pcb_qf in pcb_qf:
                        #     # print("part pcb_shape",pcb_qf.shape)
                        #     # print("pcb_gf shape is",pcb_gf.shape)
                        #     pcb_qf = torch.unsqueeze(pcb_qf,0)
                        #
                        #     pcb_distmat = re_ranking(pcb_qf, pcb_gf, k1=reranking_parameter[0], k2=reranking_parameter[1],
                        #                              lambda_value=reranking_parameter[2])
                        #     all_pcb_distmat.append(pcb_distmat)
                        # pcb_distmat = np.concatenate(all_pcb_distmat,axis=0)
                        # del pcb_qf
                        # del pcb_gf
                        # torch.cuda.empty_cache()
                    else:
                        pcb_distmat = np.zeros_like(distmat)

                        pcb_feats0 = torch.cat(self.pcb_feat_split0, dim=0)
                        pcb_qf0 = pcb_feats0[:self.num_query]
                        pcb_gf0 = pcb_feats0[self.num_query:]
                        pcb_distmat = re_ranking(
                            pcb_qf0,
                            pcb_gf0,
                            k1=reranking_parameter[0],
                            k2=reranking_parameter[1],
                            lambda_value=reranking_parameter[2])

                        pcb_feats1 = torch.cat(self.pcb_feat_split1, dim=0)
                        pcb_qf1 = pcb_feats1[:self.num_query]
                        pcb_gf1 = pcb_feats1[self.num_query:]
                        pcb_distmat = pcb_distmat + re_ranking(
                            pcb_qf1,
                            pcb_gf1,
                            k1=reranking_parameter[0],
                            k2=reranking_parameter[1],
                            lambda_value=reranking_parameter[2])

                        pcb_feats2 = torch.cat(self.pcb_feat_split2, dim=0)
                        pcb_qf2 = pcb_feats2[:self.num_query]
                        pcb_gf2 = pcb_feats2[self.num_query:]
                        pcb_distmat = pcb_distmat + re_ranking(
                            pcb_qf2,
                            pcb_gf2,
                            k1=reranking_parameter[0],
                            k2=reranking_parameter[1],
                            lambda_value=reranking_parameter[2])

                        pcb_feats3 = torch.cat(self.pcb_feat_split3, dim=0)
                        pcb_qf3 = pcb_feats0[:self.num_query]
                        pcb_gf3 = pcb_feats0[self.num_query:]
                        pcb_distmat = pcb_distmat + re_ranking(
                            pcb_qf3,
                            pcb_gf3,
                            k1=reranking_parameter[0],
                            k2=reranking_parameter[1],
                            lambda_value=reranking_parameter[2])

                        pcb_feats4 = torch.cat(self.pcb_feat_split4, dim=0)
                        pcb_qf4 = pcb_feats4[:self.num_query]
                        pcb_gf4 = pcb_feats4[self.num_query:]
                        pcb_distmat = pcb_distmat + re_ranking(
                            pcb_qf4,
                            pcb_gf4,
                            k1=reranking_parameter[0],
                            k2=reranking_parameter[1],
                            lambda_value=reranking_parameter[2])

                        pcb_feats5 = torch.cat(self.pcb_feat_split5, dim=0)
                        pcb_qf5 = pcb_feats5[:self.num_query]
                        pcb_gf5 = pcb_feats5[self.num_query:]
                        pcb_distmat = pcb_distmat + re_ranking(
                            pcb_qf5,
                            pcb_gf5,
                            k1=reranking_parameter[0],
                            k2=reranking_parameter[1],
                            lambda_value=reranking_parameter[2])
                        """
                        print("self.pcb_feat.shape",np.array(self.pcb_feat_split[0]).shape)
                        for pcb_feat in self.pcb_feat_split:
                            pcb_feats = torch.cat(pcb_feat, dim=0)
                            pcb_qf = pcb_feats[:self.num_query]
                            pcb_gf = pcb_feats[self.num_query:]
                            pcb_distmat = pcb_distmat + re_ranking(pcb_qf, pcb_gf, k1=reranking_parameter[0], k2=reranking_parameter[1],
                                                    lambda_value=reranking_parameter[2])
                        """
                        # del pcb_qf
                        # del pcb_gf
                        # torch.cuda.empty_cache()
        else:
            print('=> Computing DistMat with cosine similarity')
            distmat = cosine_similarity(qf, gf)
            if self.cfg.MODEL.IF_USE_PCB:
                if self.cfg.TEST.PCB_GLOBAL_FEAT_ENSEMBLE:
                    pcb_distmat = cosine_similarity(pcb_qf, pcb_gf)
                else:
                    pcb_distmat = np.zeros_like(distmat)
                    for pcb_feat in self.pcb_feat:
                        pcb_feats = torch.cat(pcb_feat, dim=0)
                        pcb_qf = pcb_feats[:self.num_query]
                        pcb_gf = pcb_feats[self.num_query:]
                        pcb_distmat = pcb_distmat + cosine_similarity(
                            pcb_qf, pcb_gf)
        if self.cfg.MODEL.IF_USE_PCB:
            if self.cfg.TEST.USE_LOCAL:
                distmat = distmat + pcb_distmat
        print(distmat, 'distmat')
        num_q, num_g = distmat.shape
        indices = np.argsort(distmat, axis=1)
        data = dict()
        print(len(g_path), 'self.img_name_q')
        print(len(q_path), 'self.img_name_g')
        for q_idx in range(num_q):
            order = indices[q_idx]  # select one row
            result_query = np.array(g_path)[order[:self.max_rank]]
            data[q_path[q_idx]] = [str(i) for i in result_query]
        return data, distmat, q_path, g_path
Esempio n. 14
0
    def compute(self):  # called after each epoch
        # feats = torch.cat(self.feats, dim=0)
        print(self.num_query)
        # torch.save(feats,"feats")
        # np.save('pid',self.pids)
        # np.save('camid',self.camids)
        feats = torch.load('/home/lab3/bi/0816_MGN/dmt/utils/ibn50_feats')
        self.num_query = 41574
        self.pids = np.load('/home/lab3/bi/0816_MGN/dmt/utils/ibn50_pid.npy')
        self.camids = np.load(
            '/home/lab3/bi/0816_MGN/dmt/utils/ibn50_camid.npy')

        if self.feat_norm:
            print("The test feature is normalized")
            feats = torch.nn.functional.normalize(feats, dim=1,
                                                  p=2)  # along channel
        if self.pca:
            print("=> AUTO PCA")
            feats = pca_whiten(feats)
            if self.feat_norm:
                print("The test feature is normalized")
                feats = torch.nn.functional.normalize(feats, dim=1,
                                                      p=2)  # along channel

        #dba
        if self.dba:
            print('=> Enter DBA')
            feats = database_aug(feats, top_k=9)
            if self.feat_norm:
                print("The test feature is normalized")
                feats = torch.nn.functional.normalize(feats, dim=1,
                                                      p=2)  # along channel
        # query
        qf = feats[:self.num_query]
        q_pids = np.asarray(self.pids[:self.num_query])
        q_camids = np.asarray(self.camids[:self.num_query])
        # gallery
        gf = feats[self.num_query:]
        g_pids = np.asarray(self.pids[self.num_query:])
        g_camids = np.asarray(self.camids[self.num_query:])

        if self.reranking:
            print('=> Enter reranking')
            # distmat = re_ranking(qf, gf, k1=10, k2=3, lambda_value=0.6)
            distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)
            # distmat = re_ranking(qf, gf, k1=20, k2=3, lambda_value=0.3)
            # distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3)
            # qf = qf.cpu().numpy()
            # gf = gf.cpu().numpy()
            # distmat = re_ranking_numpy(qf, gf, k1=50, k2=15, lambda_value=0.3)

        else:
            print('=> Computing DistMat with cosine similarity')
            distmat = cosine_similarity(qf, gf)

        np.save('dismat_rbn50_448.npy', distmat)

        #myQE

        # feats = database_aug_after_rerank(feats,distmat, top_k=9)
        # # query
        # qf = feats[:self.num_query]
        # q_pids = np.asarray(self.pids[:self.num_query])
        # q_camids = np.asarray(self.camids[:self.num_query])
        # # gallery
        # gf = feats[self.num_query:]
        # g_pids = np.asarray(self.pids[self.num_query:])
        # g_camids = np.asarray(self.camids[self.num_query:])
        # distmat = cosine_similarity(qf, gf)

        cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)

        return cmc, mAP, distmat, self.pids, self.camids, qf, gf
lambda_list = [0.1, 0.2, 0.3, 0.4]

best_queue = []
MAX_QUEUE_LEN = 10
for k1 in k1_list:
    for k2 in k2_list:
        if k2 > 0.7 * k1:
            break
        for lam in lambda_list:
            print(
                '======================================================================='
            )
            print(best_queue)
            distmat_reranking = re_ranking(qf,
                                           gf,
                                           k1=k1,
                                           k2=k2,
                                           lambda_value=lam)
            cmc, mAP = eval_func(distmat_reranking, q_pids, g_pids, q_camids,
                                 g_camids)
            print('Processing k1:{}, k2:{}, k3:{}'.format(k1, k2, lam))
            print('mAP:{}, rank1:{}'.format(mAP, cmc[0]))
            score = mAP + cmc[0]
            if len(best_queue) <= MAX_QUEUE_LEN:
                heapq.heappush(best_queue, (score, (k1, k2, lam, mAP, cmc[0])))
            else:
                if score > best_queue[0][0]:
                    heapq.heappop(best_queue)
                    heapq.heappush(best_queue,
                                   (score, (k1, k2, lam, mAP, cmc[0])))
                else:
Esempio n. 16
0
def train(**kwargs):
    opt._parse(kwargs)
    #opt.lr=0.00002
    opt.model_name='PCB'
    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)
    tgt_dataset = data_manager.init_dataset(name=opt.tgt_dataset,mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(
        ImageData(dataset.train, TrainTransform(opt.datatype)),
        batch_size=opt.train_batch, num_workers=opt.workers,
        pin_memory=pin_memory, drop_last=True
    )

    tgt_trainloader = DataLoader(
        ImageData(tgt_dataset.train, TrainTransform(opt.datatype)),
        batch_size=opt.train_batch,num_workers=opt.workers,
        pin_memory=pin_memory,drop_last=True
    )

    tgt_queryloader = DataLoader(
        ImageData(tgt_dataset.query, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    tgt_galleryloader = DataLoader(
        ImageData(tgt_dataset.gallery, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )
    tgt_queryFliploader = DataLoader(
        ImageData(tgt_dataset.query, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    tgt_galleryFliploader = DataLoader(
        ImageData(tgt_dataset.gallery, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    print('initializing model ...')
    model = PCB(dataset.num_train_pids)


    optim_policy = model.get_optim_policy()

    start_epoch = opt.start_epoch

    if opt.pretrained_model:
        checkpoint = torch.load(opt.pretrained_model)
        state_dict = checkpoint['state_dict']

        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        try:
            model.load_state_dict(state_dict, False)
            print('load pretrained model ' + opt.pretrained_model)
        except:
            RuntimeError('please keep the same size with source dataset..')
    else:
        raise RuntimeError('please load a pre-trained model...')

    print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6))


    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        print('transfer directly....... ')
        reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader,
                                tgt_queryFliploader, tgt_galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)
        return


    #xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids)


    embedding_criterion = SelfTraining_TripletLoss(margin=0.5,num_instances=4)

    # def criterion(triplet_y, softmax_y, labels):
    #     losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \
    #              [xent_criterion(output, labels) for output in softmax_y]
    #     loss = sum(losses)
    #     return loss


    def criterion(triplet_y, softmax_y, labels):
        #losses = [torch.sum(torch.stack([xent_criterion(logits, labels) for logits in softmax_y]))]
        losses = [torch.sum(torch.stack([embedding_criterion(output,labels) for output in triplet_y]))]
        loss = sum(losses)
        return loss


    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay)


    # get trainer and evaluator
    reid_trainer = PCBTrainer(opt, model, optimizer, criterion, summary_writer)

    def adjust_lr(optimizer, ep):
        if ep < 50:
            lr = opt.lr * (ep // 5 + 1)
        elif ep < 200:
            lr = opt.lr*10
        elif ep < 300:
            lr = opt.lr
        else:
            lr = opt.lr*0.1
        for p in optimizer.param_groups:
            p['lr'] = lr

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0


    print('transfer directly.....')
    reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader,
                            tgt_queryFliploader, tgt_galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)


    for iter_n in range(start_epoch,opt.max_epoch):
        if opt.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            print('Iteration {}: Extracting Source Dataset Features...'.format(iter_n + 1))
            source_features, _ = extract_pcb_features(model, trainloader)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n + 1))
        target_features, _ = extract_pcb_features(model, tgt_trainloader)
        # synchronization feature order with dataset.train

        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(
            source_features, target_features, lambda_value=opt.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2    取上三角
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(opt.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()  # DBSCAN聚类半径
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=8)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        del(rerank_dist)
        del(source_features)
        del(target_features)
        try:
            gc.collect()
        except:
            print('cannot collect')

        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, _), label in zip(tgt_dataset.train, labels):
            if label == -1:
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, 0))
        print('Iteration {} have {} training images'.format(iter_n + 1, len(new_dataset)))

        selftrain_loader = DataLoader(
            ImageData(new_dataset, TrainTransform(opt.datatype)),
            sampler=RandomIdentitySampler(new_dataset, opt.num_instances),
            batch_size=opt.train_batch, num_workers=opt.workers,
            pin_memory=pin_memory, drop_last=True
        )

        # train model with new generated dataset
        trainer = PCBTrainer(opt, model, optimizer, criterion, summary_writer)
        reid_evaluator = ResNetEvaluator(model)
        # Start training
        for epoch in range(opt.selftrain_iterations):
            trainer.train(epoch, selftrain_loader)


        # skip if not save model
        if opt.eval_step > 0 and (iter_n + 1) % opt.eval_step == 0 or (iter_n + 1) == opt.max_epoch:
            #  just avoid out of memory during eval,and can't save the model
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({'state_dict': state_dict, 'epoch': iter_n + 1},
                            is_best=0, save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(iter_n + 1) + '.pth.tar')


            if (iter_n + 1) % (opt.eval_step*4) == 0:
                if opt.mode == 'class':
                    rank1 = test(model, tgt_queryloader)
                else:
                    rank1 = reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader, tgt_queryFliploader,
                                                    tgt_galleryFliploader)
                is_best = rank1 > best_rank1
                if is_best:
                    best_rank1 = rank1
                    best_epoch = iter_n + 1

                if use_gpu:
                    state_dict = model.module.state_dict()
                else:
                    state_dict = model.state_dict()

                if is_best:
                    save_checkpoint({'state_dict': state_dict, 'epoch': iter_n + 1},
                                    is_best=is_best, save_dir=opt.save_dir,
                                    filename='checkpoint_ep' + str(iter_n + 1) + '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(best_rank1, best_epoch))
Esempio n. 17
0
def do_inference(cfg, model, val_loader, num_query, query_name, gallery_name):
    time_start = time.time()
    model.eval()
    model.cuda()
    model = nn.DataParallel(model)
    feature = torch.FloatTensor().cuda()
    with torch.no_grad():
        for (img, pid) in val_loader:
            input_img = img.cuda()
            input_img_mirror = input_img.flip(dims=[3])
            outputs = model(input_img)
            outputs_mirror = model(input_img_mirror)
            f = outputs + outputs_mirror
            feature = torch.cat((feature, f), 0)
    #feats = torch.nn.functional.normalize(features, dim=1, p=2)
    #query_vecs = feature[-num_query:, :]
    #reference_vecs = feature[:-num_query, :]
    if cfg.TEST.DB_QE:
        print("进行db_qe过程")
        query_vecs = feature[-num_query:, :].cpu().numpy()
        reference_vecs = feature[:-num_query, :].cpu().numpy()
        query_vecs, reference_vecs, distmat = retrieve(query_vecs,
                                                       reference_vecs)

    elif cfg.TEST.RE_RANKING:
        feats = torch.nn.functional.normalize(feature, dim=1, p=2)
        query_vecs = feature[-num_query:, :]
        reference_vecs = feature[:-num_query, :]
        ranking_parameter = cfg.TEST.RE_RANKING_PARAMETER
        k1 = ranking_parameter[0]
        k2 = ranking_parameter[1]
        lambda_value = ranking_parameter[2]
        distmat = re_ranking(query_vecs,
                             reference_vecs,
                             k1=k1,
                             k2=k2,
                             lambda_value=lambda_value)
    else:
        print("最原始的计算距离过程")
        query_vecs = feature[-num_query:, :].cpu().numpy()
        reference_vecs = feature[:-num_query, :].cpu().numpy()
        print(query_vecs.shape)
        distmat = calculate_sim_matrix(query_vecs, reference_vecs)
        #query_vecs = feature[-num_query:, :]
        #reference_vecs = feature[:-num_query, :]
        #distmat = cosine_dist(query_vecs, reference_vecs)
        #distmat = distmat.cpu().numpy()

    num_q, num_g = distmat.shape
    indices = np.argsort(distmat, axis=1)
    max_10_indices = indices[:, :10]
    res_dict = dict()
    for q_idx in range(num_q):
        filename = query_name[q_idx][0].split("/")[-1]
        max_10_files = [
            gallery_name[i][0].split("/")[-1] for i in max_10_indices[q_idx]
        ]
        res_dict[filename] = max_10_files
    with open('submission.csv', 'w') as file:
        for k, v in res_dict.items():
            writer_string = "%s,{%s,%s,%s,%s,%s,%s,%s,%s,%s,%s}\n" % (
                k, v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7], v[8], v[9])
            file.write(writer_string)
    file.close()
    time_final = time.time()
    print("总共用时:", time_final - time_start)
Esempio n. 18
0
    def compute(self,
                fic=False,
                fac=False,
                rm_camera=False,
                save_dir='./',
                crop_test=False,
                la=0.18):
        origin_track_dist = 0
        feats = torch.cat(self.feats, dim=0)
        if crop_test:
            feats = feats[::2] + feats[1::2]
            self.pids = self.pids[::2]
            self.camids = self.camids[::2]
            self.tids = self.tids[::2]
            self.num_query = int(self.num_query / 2)
        if self.feat_norm:
            print("The test feature is normalized")
            feats = torch.nn.functional.normalize(feats, dim=1,
                                                  p=2)  # along channel
        f = open(os.path.join(save_dir, 'fic.txt'), 'w')

        # gallery
        gf = feats[self.num_query:]
        g_pids = np.asarray(self.pids[self.num_query:])
        g_camids = np.asarray(self.camids[self.num_query:])
        gallery_tids = np.asarray(self.tids[self.num_query:])
        # query
        qf = feats[:self.num_query]
        q_pids = np.asarray(self.pids[:self.num_query])
        q_camids = np.asarray(self.camids[:self.num_query])
        if fic:
            qf1, gf1 = run_fic(qf, gf, q_camids, g_camids, la=la)

        if self.reranking_track:
            print('=> Enter track reranking')
            distmat, origin_track_dist = self.track_ranking(
                qf, gf, gallery_tids, self.unique_tids)
            if fic:
                distmat1, origin_track_dist1 = self.track_ranking(
                    qf1, gf1, gallery_tids, self.unique_tids, fic=True)
                distmat = (distmat + distmat1) / 2.0
                origin_track_dist = 0.5 * origin_track_dist + 0.5 * origin_track_dist1
        elif self.reranking:
            print('=> Enter reranking')
            distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3)
            if fic:
                distmat += re_ranking(qf1, gf1, k1=50, k2=15, lambda_value=0.3)
        else:
            print('=> Computing DistMat with euclidean distance')
            distmat = euclidean_distance(qf, gf)
            if fic:
                distmat += euclidean_distance(qf1, gf1)
        if rm_camera:
            cam_matches = (g_camids == q_camids[:,
                                                np.newaxis]).astype(np.int32)
            distmat = distmat + 10.0 * cam_matches
            cam_matches = ((g_camids >= 40).astype(np.int32) !=
                           (q_camids[:, np.newaxis] >= 40).astype(
                               np.int32)).astype(np.int32)
            distmat = distmat + 10.0 * cam_matches

        if self.dataset in ['aic', 'aic_sim', 'aic_sim_spgan']:
            cmc = [0.0 for i in range(100)]
            mAP = 0.0
            print('No evalution!!!!!!!!!!!!!!!!!!!')
        else:
            cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)

        #  sort_distmat_index = np.argsort(distmat, axis=1)
        #  print(sort_distmat_index.shape,'sort_distmat_index.shape')
        #  print(sort_distmat_index,'sort_distmat_index')
        #  with open(os.path.join(save_dir, 'track2.txt'), 'w') as f:
        #  for item in sort_distmat_index:
        #  for i in range(99):
        #  f.write(str(item[i] + 1) + ' ')
        #  f.write(str(item[99] + 1) + '\n')
        #  print('writing result to {}'.format(os.path.join(save_dir, 'track2.txt')))

        #  np.save(os.path.join(save_dir, 'origin_track_dist.npy'), origin_track_dist)
        return cmc, mAP, distmat, self.pids, self.camids, qf, gf