Exemple #1
0
    def _evaluate(self,
                  epoch,
                  dataset_name='',
                  queryloader=None,
                  galleryloader=None,
                  dist_metric='euclidean',
                  visrank=False,
                  visrank_topk=20,
                  save_dir='',
                  use_metric_cuhk03=False,
                  ranks=[1, 5, 10, 20]):
        batch_time = AverageMeter()

        self.model.eval()

        print('Extracting features from query set ...')
        qf, q_pids, q_camids = [], [], []
        for batch_idx, data in enumerate(queryloader):
            imgs, pids, camids = self._parse_data_for_eval(data)
            if self.use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features = self._extract_features(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        print('Extracting features from gallery set ...')
        gf, g_pids, g_camids = [], [], []
        end = time.time()
        for batch_idx, data in enumerate(galleryloader):
            imgs, pids, camids = self._parse_data_for_eval(data)
            if self.use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features = self._extract_features(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

        print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

        distmat = metrics.compute_distance_matrix(qf, gf, dist_metric)
        distmat = distmat.numpy()

        print('Computing CMC and mAP ...')
        cmc, mAP = metrics.evaluate_rank(distmat,
                                         q_pids,
                                         g_pids,
                                         q_camids,
                                         g_camids,
                                         use_metric_cuhk03=use_metric_cuhk03)

        print('** Results **')
        print('mAP: {:.1%}'.format(mAP))
        print('CMC curve')
        for r in ranks:
            print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))

        if visrank:
            visualize_ranked_results(
                distmat,
                self.datamanager.return_testdataset_by_name(dataset_name),
                save_dir=osp.join(save_dir, 'visrank-' + str(epoch + 1),
                                  dataset_name),
                topk=visrank_topk)

        return cmc[0]
Exemple #2
0
    def _evaluate(self, epoch, dataset_name='', queryloader=None, galleryloader=None,
                  dist_metric='euclidean', normalize_feature=False, visrank=False,
                  visrank_topk=10, save_dir='', use_metric_cuhk03=False, ranks=(1, 5, 10, 20),
                  rerank=False, iteration=0):
        batch_time = AverageMeter()

        print('Extracting features from query set...')
        qf, q_pids, q_camids = [], [], []  # query features, query person IDs and query camera IDs
        for batch_idx, data in tqdm(enumerate(queryloader), 'Processing query...'):
            imgs, pids, camids = self._parse_data_for_eval(data)
            if self.use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features = self._extract_features(imgs, data[3])
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        print('Extracting features from gallery set...')
        gf, g_pids, g_camids = [], [], []  # gallery features, gallery person IDs and gallery camera IDs
        for batch_idx, data in tqdm(enumerate(galleryloader), 'Processing gallery...'):
            imgs, pids, camids = self._parse_data_for_eval(data)
            if self.use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features = self._extract_features(imgs, data[3])
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

        print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

        if normalize_feature:
            print('Normalizing features with L2 norm...')
            qf = F.normalize(qf, p=2, dim=1)
            gf = F.normalize(gf, p=2, dim=1)

        print('Computing distance matrix with metric={}...'.format(dist_metric))
        distmat = metrics.compute_distance_matrix(qf, gf, dist_metric)
        distmat = distmat.numpy()

        if rerank:
            print('Applying person re-ranking ...')
            distmat_qq = metrics.compute_distance_matrix(qf, qf, dist_metric)
            distmat_gg = metrics.compute_distance_matrix(gf, gf, dist_metric)
            distmat = re_ranking(distmat, distmat_qq, distmat_gg)

        print('Computing CMC and mAP ...')
        cmc, mAP = metrics.evaluate_rank(
            distmat,
            q_pids,
            g_pids,
            q_camids,
            g_camids,
            use_metric_cuhk03=use_metric_cuhk03
        )
        if self.writer is not None:
            self.writer.add_scalar('Val/{}/mAP'.format(dataset_name), mAP, epoch + 1)
            for r in ranks:
                self.writer.add_scalar('Val/{}/Rank-{}'.format(dataset_name, r), cmc[r - 1], epoch + 1)

        print('** Results **')
        print('mAP: {:.2%}'.format(mAP))
        print('CMC curve')
        for r in ranks:
            print('Rank-{:<3}: {:.2%}'.format(r, cmc[r-1]))

        if visrank:
            visualize_ranked_results(
                distmat,
                self.datamanager.return_testdataset_by_name(dataset_name),
                self.datamanager.data_type,
                width=self.datamanager.width,
                height=self.datamanager.height,
                save_dir=osp.join(save_dir, 'visrank_' + dataset_name),
                topk=visrank_topk
            )

        return cmc[0]
def evaluate(model,
             query_loader=None,
             gallery_loader=None,
             dist_metric='euclidean',
             use_metric_cuhk03=False,
             ranks=[1, 5, 10, 20],
             rerank=False):
    batch_time = avgmeter.AverageMeter()

    def _feature_extraction(data_loader):
        f_, pids_, camids_ = [], [], []
        for batch_idx, data in enumerate(data_loader):
            imgs, pids, camids = _parse_data_for_eval(data)
            end = time.time()
            _, features = model.predict_on_batch(imgs)
            batch_time.update(time.time() - end)
            f_.extend(features.numpy())
            pids_.extend(pids.numpy())
            camids_.extend(camids.numpy())
        f_ = np.stack(f_)
        pids_ = np.stack(pids_)
        camids_ = np.stack(camids_)
        return f_, pids_, camids_

    print('Extracting features from query set ...')
    qf, q_pids, q_camids = _feature_extraction(query_loader)
    print('Done, obtained {}-by-{} matrix'.format(qf.shape[0], qf.shape[1]))

    print('Extracting features from gallery set ...')
    gf, g_pids, g_camids = _feature_extraction(gallery_loader)
    print('Done, obtained {}-by-{} matrix'.format(gf.shape[0], gf.shape[1]))

    print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

    # if normalize_feature:
    #     print('Normalzing features with L2 norm ...')
    #     qf = F.normalize(qf, p=2, dim=1)
    #     gf = F.normalize(gf, p=2, dim=1)

    print(f'Computing distance matrix with metric={dist_metric} ...')
    distmat = compute_distance_matrix(qf, gf, dist_metric)

    if rerank:
        print('Applying person re-ranking ...')
        distmat_qq = compute_distance_matrix(qf, qf, dist_metric)
        distmat_gg = compute_distance_matrix(gf, gf, dist_metric)
        distmat = utils.re_ranking(distmat, distmat_qq, distmat_gg)

    print('Computing CMC and mAP ...')
    cmc, mAP = metrics.evaluate_rank(
        distmat,
        q_pids,
        g_pids,
        q_camids,
        g_camids,
        use_metric_cuhk03=use_metric_cuhk03
    )

    print('** Results **')
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in ranks:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))

    # if visrank:
    #     visualize_ranked_results(
    #         distmat,
    #         self.datamanager.
    #             return_query_and_gallery_by_name(dataset_name),
    #         self.datamanager.data_type,
    #         width=self.datamanager.width,
    #         height=self.datamanager.height,
    #         save_dir=osp.join(save_dir, 'visrank_' + dataset_name),
    #         topk=visrank_topk
    #     )

    return cmc[0]
    def _evaluate(
        self,
        dataset_name='',
        query_loader=None,
        gallery_loader=None,
        dist_metric='euclidean',
        normalize_feature=False,
        visrank=False,
        visrank_topk=10,
        save_dir='',
        use_metric_cuhk03=False,
        ranks=[1, 5, 10, 20],
        rerank=False
    ):
        batch_time = AverageMeter()

        def _feature_extraction(data_loader):
            f_, pids_, camids_ = [], [], []
            for batch_idx, data in enumerate(data_loader):
                imgs, pids, camids = self.parse_data_for_eval(data)
                if self.use_gpu:
                    imgs = imgs.cuda()
                end = time.time()
                features = self.extract_features(imgs)
                batch_time.update(time.time() - end)
                features = features.cpu().clone()
                f_.append(features)
                pids_.extend(pids)
                camids_.extend(camids)
            f_ = torch.cat(f_, 0)
            pids_ = np.asarray(pids_)
            camids_ = np.asarray(camids_)
            return f_, pids_, camids_

        print('Extracting features from query set ...')
        qf, q_pids, q_camids = _feature_extraction(query_loader)
        print(qf.shape)
        print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        print('Extracting features from gallery set ...')
        gf, g_pids, g_camids = _feature_extraction(gallery_loader)
        print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

        print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

        if normalize_feature:
            print('Normalzing features with L2 norm ...')
            qf = F.normalize(qf, p=2, dim=1)
            gf = F.normalize(gf, p=2, dim=1)

        print(
            'Computing distance matrix with metric={} ...'.format(dist_metric)
        )
        distmat = metrics.compute_distance_matrix(qf, gf, dist_metric)
        distmat = distmat.numpy()

        if rerank:
            print('Applying person re-ranking ...')
            distmat_qq = metrics.compute_distance_matrix(qf, qf, dist_metric)
            distmat_gg = metrics.compute_distance_matrix(gf, gf, dist_metric)
            distmat = re_ranking(distmat, distmat_qq, distmat_gg)

        print('Computing CMC and mAP ...')
        cmc, mAP = metrics.evaluate_rank(
            distmat,
            q_pids,
            g_pids,
            q_camids,
            g_camids,
            use_metric_cuhk03=use_metric_cuhk03
        )

        print('** Results **')
        print('mAP: {:.1%}'.format(mAP))
        print('CMC curve')
        for r in ranks:
            print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))

        if visrank:
            visualize_ranked_results(
                distmat,
                self.datamanager.fetch_test_loaders(dataset_name),
                self.datamanager.data_type,
                width=self.datamanager.width,
                height=self.datamanager.height,
                save_dir=osp.join(save_dir, 'visrank_' + dataset_name),
                topk=visrank_topk
            )

        return cmc[0], mAP
Exemple #5
0
def evaluate(model,
             queryloader,
             galleryloader,
             dist_metric='euclidean',
             normalize_feature=False,
             rerank=False,
             return_distmat=False):
    batch_time = AverageMeter()
    model.eval()
    with torch.no_grad():
        print('Extracting features from query set ...')
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
            imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        print('Extracting features from gallery set ...')
        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
            imgs = imgs.cuda()
            end = time.time()
            features = model(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

    print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

    if normalize_feature:
        print('Normalzing features with L2 norm ...')
        qf = F.normalize(qf, p=2, dim=1)
        gf = F.normalize(gf, p=2, dim=1)

    print('Computing distance matrix with metric={} ...'.format(dist_metric))
    distmat = compute_distance_matrix(qf, gf, dist_metric)
    distmat = distmat.numpy()

    if rerank:
        print('Applying person re-ranking ...')
        distmat_qq = compute_distance_matrix(qf, qf, dist_metric)
        distmat_gg = compute_distance_matrix(gf, gf, dist_metric)
        distmat = re_ranking(distmat, distmat_qq, distmat_gg)

    print('Computing CMC and mAP ...')
    cmc, mAP = evaluate_rank(distmat, q_pids, g_pids, q_camids, g_camids)
    print('** Results **')
    print('mAP: {:.1%}'.format(mAP))
    print('CMC curve')
    for r in [1, 5, 10, 20]:
        print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))

    if return_distmat:
        return distmat

    return cmc[0]
    def _evaluate_reid(self,
                       model,
                       epoch,
                       dataset_name='',
                       query_loader=None,
                       gallery_loader=None,
                       dist_metric='euclidean',
                       normalize_feature=False,
                       visrank=False,
                       visrank_topk=10,
                       save_dir='',
                       use_metric_cuhk03=False,
                       ranks=(1, 5, 10, 20),
                       rerank=False,
                       model_name='',
                       lr_finder=False):
        def _feature_extraction(data_loader):
            f_, pids_, camids_ = [], [], []
            for _, data in enumerate(data_loader):
                imgs, pids, camids = self.parse_data_for_eval(data)
                if self.use_gpu:
                    imgs = imgs.cuda()

                features = model(imgs),
                features = features.data.cpu()

                f_.append(features)
                pids_.extend(pids)
                camids_.extend(camids)

            f_ = torch.cat(f_, 0)
            pids_ = np.asarray(pids_)
            camids_ = np.asarray(camids_)

            return f_, pids_, camids_

        qf, q_pids, q_camids = _feature_extraction(query_loader)
        gf, g_pids, g_camids = _feature_extraction(gallery_loader)

        if normalize_feature:
            qf = F.normalize(qf, p=2, dim=1)
            gf = F.normalize(gf, p=2, dim=1)

        distmat = metrics.compute_distance_matrix(qf, gf, dist_metric)
        distmat = distmat.numpy()

        if rerank:
            distmat_qq = metrics.compute_distance_matrix(qf, qf, dist_metric)
            distmat_gg = metrics.compute_distance_matrix(gf, gf, dist_metric)
            distmat = re_ranking(distmat, distmat_qq, distmat_gg)

        cmc, mAP = metrics.evaluate_rank(distmat,
                                         q_pids,
                                         g_pids,
                                         q_camids,
                                         g_camids,
                                         use_metric_cuhk03=use_metric_cuhk03)

        if self.writer is not None and not lr_finder:
            self.writer.add_scalar(
                'Val/{}/{}/mAP'.format(dataset_name, model_name), mAP,
                epoch + 1)
            for r in ranks:
                self.writer.add_scalar(
                    'Val/{}/{}/Rank-{}'.format(dataset_name, model_name, r),
                    cmc[r - 1], epoch + 1)
        if not lr_finder:
            print('** Results ({}) **'.format(model_name))
            print('mAP: {:.2%}'.format(mAP))
            print('CMC curve')
            for r in ranks:
                print('Rank-{:<3}: {:.2%}'.format(r, cmc[r - 1]))

        if visrank and not lr_finder:
            visualize_ranked_results(
                distmat,
                self.datamanager.fetch_test_loaders(dataset_name),
                self.datamanager.data_type,
                width=self.datamanager.width,
                height=self.datamanager.height,
                save_dir=osp.join(save_dir, 'visrank_' + dataset_name),
                topk=visrank_topk)

        return cmc[0]
Exemple #7
0
    def _evaluate(self, arch, epoch, dataset_name='', queryloader=None, galleryloader=None,
                  dist_metric='euclidean', normalize_feature=False, visrank=False,
                  visrank_topk=20, save_dir='', use_metric_cuhk03=False, ranks=[1, 5, 10, 20],
                  rerank=False, viscam=False, viscam_num=10, viscam_only=False):
        with self.experiment.test():
            if not viscam_only:
                batch_time = AverageMeter()
                combine_time = AverageMeter()

                self.model.eval()

                print('Extracting features from query set ...')
                qf, q_pids, q_camids = [], [], [] # query features, query person IDs and query camera IDs
                for batch_idx, data in enumerate(queryloader):
                    imgs, pids, camids = self._parse_data_for_eval(data)
                    if self.use_gpu:
                        imgs = imgs.cuda()
                    end = time.time()
                    features = self._extract_features(imgs)
                    batch_time.update(time.time() - end, len(pids), True)
                    features = features.data.cpu()
                    qf.append(features)
                    q_pids.extend(pids)
                    q_camids.extend(camids)
                qf = torch.cat(qf, 0)
                q_pids = np.asarray(q_pids)
                q_camids = np.asarray(q_camids)
                print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

                print('Extracting features from gallery set ...')
                gf, g_pids, g_camids = [], [], [] # gallery features, gallery person IDs and gallery camera IDs
                end = time.time()
                for batch_idx, data in enumerate(galleryloader):
                    imgs, pids, camids = self._parse_data_for_eval(data)
                    if self.use_gpu:
                        imgs = imgs.cuda()
                    end = time.time()
                    features = self._extract_features(imgs)
                    batch_time.update(time.time() - end, len(pids), True)
                    features = features.data.cpu()
                    gf.append(features)
                    g_pids.extend(pids)
                    g_camids.extend(camids)
                gf = torch.cat(gf, 0)
                g_pids = np.asarray(g_pids)
                g_camids = np.asarray(g_camids)

                end = time.time()
                num_images = len(g_pids)
                self.combine_fn.train()
                gf, g_pids = self.combine_fn(gf, g_pids, g_camids)
                if self.save_embed:
                    assert osp.isdir(self.save_embed)
                    path = osp.realpath(self.save_embed)
                    np.save(path + '/gf-' + self.combine_method + '.npy', gf)
                    np.save(path + '/g_pids-' + self.combine_method + '.npy', g_pids)
                combine_time.update(time.time() - end, num_images, True)
                time.time() - end
                gf = torch.tensor(gf, dtype=torch.float)
                print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

                print('Speed: {:.4f} sec/image'.format(batch_time.avg + combine_time.avg))

                if normalize_feature:
                    print('Normalzing features with L2 norm ...')
                    qf = F.normalize(qf, p=2, dim=1)
                    gf = F.normalize(gf, p=2, dim=1)

                print('Computing distance matrix with metric={} ...'.format(dist_metric))
                distmat = metrics.compute_distance_matrix(qf, gf, dist_metric)
                distmat = distmat.numpy()

                if rerank:
                    print('Applying person re-ranking ...')
                    distmat_qq = metrics.compute_distance_matrix(qf, qf, dist_metric)
                    distmat_gg = metrics.compute_distance_matrix(gf, gf, dist_metric)
                    distmat = re_ranking(distmat, distmat_qq, distmat_gg)

                print('Computing CMC and mAP ...')
                cmc, mAP = metrics.evaluate_rank(
                    distmat,
                    q_pids,
                    g_pids,
                    q_camids,
                    g_camids,
                    use_metric_cuhk03=use_metric_cuhk03
                )

                print('** Results **')
                print('mAP: {:.1%}'.format(mAP))
                print('CMC curve')
                for r in ranks:
                    print('Rank-{:<3}: {:.1%}'.format(r, cmc[r-1]))

                # write to Tensorboard and comet.ml
                if not self.test_only:
                    rs = {'eval-rank-{:<3}'.format(r):cmc[r-1] for r in ranks}
                    self.writer.add_scalars('eval/ranks',rs,epoch)
                    self.experiment.log_metrics(rs,step=epoch)
                    self.writer.add_scalar('eval/mAP',mAP,epoch)
                    self.experiment.log_metric('eval-mAP',mAP,step=epoch)
                    print('Results written to tensorboard and comet.ml.')

            if visrank:
                visualize_ranked_results(
                    distmat,
                    self.datamanager.return_testdataset_by_name(dataset_name),
                    save_dir=osp.join(save_dir, 'visrank-'+str(epoch+1), dataset_name),
                    topk=visrank_topk
                )

            if viscam:
                if arch == 'osnet_x1_0' or arch == 'osnet_custom':
                    # print(self.model)
                    visualize_cam(
                        model=self.model,
                        finalconv='conv5',  # for OSNet
                        dataset=self.datamanager.return_testdataset_by_name(dataset_name),
                        save_dir=osp.join(save_dir, 'viscam-'+str(epoch+1), dataset_name),
                        num=viscam_num
                    )
                elif arch == 'resnext50_32x4d':
                    # print(self.model)
                    visualize_cam(
                        model=self.model,
                        finalconv='layer4',  # for resnext50
                        dataset=self.datamanager.return_testdataset_by_name(dataset_name),
                        save_dir=osp.join(save_dir, 'viscam-'+str(epoch+1), dataset_name),
                        num=viscam_num
                    )
                    
        if viscam_only:
            raise RuntimeError('Stop exec because `viscam_only` is set to true.')

        return cmc[0]
Exemple #8
0
def test(model,
         queryloader,
         galleryloader,
         use_gpu,
         ranks=[1, 5, 10, 20],
         return_distmat=False):

    flip_eval = args.flip_eval

    if flip_eval:
        print('# Using Flip Eval')

    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids, q_paths = [], [], [], []

        if flip_eval:
            enumerator = enumerate(zip(queryloader[0], queryloader[1]))
        else:
            enumerator = enumerate(queryloader[0])

        for batch_idx, package in enumerator:
            end = time.time()

            if flip_eval:
                (imgs0, pids, camids, paths), (imgs1, _, _, _) = package
                if use_gpu:
                    imgs0, imgs1 = imgs0.cuda(), imgs1.cuda()
                features = (model(imgs0)[0] + model(imgs1)[0]) / 2.0
                # print(features.size())
            else:
                (imgs, pids, camids, paths) = package
                if use_gpu:
                    imgs = imgs.cuda()

                features = model(imgs)[0]

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
            q_paths.extend(paths)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids, g_paths = [], [], [], []
        if flip_eval:
            enumerator = enumerate(zip(galleryloader[0], galleryloader[1]))
        else:
            enumerator = enumerate(galleryloader[0])

        for batch_idx, package in enumerator:
            end = time.time()

            if flip_eval:
                (imgs0, pids, camids, paths), (imgs1, _, _, _) = package
                if use_gpu:
                    imgs0, imgs1 = imgs0.cuda(), imgs1.cuda()
                features = (model(imgs0)[0] + model(imgs1)[0]) / 2.0
                # print(features.size())
            else:
                (imgs, pids, camids, _) = package
                if use_gpu:
                    imgs = imgs.cuda()

                features = model(imgs)[0]

            batch_time.update(time.time() - end)

            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
            g_paths.extend(paths)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

        if os.environ.get('save_feat'):
            import scipy.io as io
            io.savemat(
                os.environ.get('save_feat'), {
                    'q': qf.data.numpy(),
                    'g': gf.data.numpy(),
                    'qt': q_pids,
                    'gt': g_pids
                })
            # return

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch_size))

    m, n = qf.size(0), gf.size(0)
    distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
        torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    distmat.addmm_(1, -2, qf, gf.t())
    distmat = distmat.numpy()

    if os.environ.get('distmat'):
        import scipy.io as io
        io.savemat(os.environ.get('distmat'), {
            'distmat': distmat,
            'qp': q_paths,
            'gp': g_paths
        })

    print("Computing CMC and mAP")
    cmc, mAP = evaluate_rank(distmat,
                             q_pids,
                             g_pids,
                             q_camids,
                             g_camids,
                             use_metric_cuhk03=args.use_metric_cuhk03)

    print("Results ----------")
    print("mAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0]
Exemple #9
0
def test(model,
         queryloader,
         galleryloader,
         pool,
         use_gpu,
         ranks=(1, 5, 10, 20),
         return_distmat=False):
    global mAP
    batch_time = AverageMeter()

    model.eval()

    with torch.no_grad():
        qf, q_pids, q_camids = [], [], []
        for batch_idx, (imgs, pids, camids, adj) in enumerate(queryloader):
            if use_gpu:
                imgs, adj = imgs.cuda(), adj.cuda()
            if args.test_sample in ['dense', 'skipdense']:
                b, n, s, c, h, w = imgs.size()
                imgs = imgs.view(b * n, s, c, h, w)
                adj = adj.view(b * n, adj.size(-1), adj.size(-1))
            else:
                n, s, c, h, w = imgs.size()

            end = time.time()
            features = model(imgs, adj)
            batch_time.update(time.time() - end)
            if args.test_sample in ['dense', 'skipdense']:
                features = features.view(n, 1, -1)
                if pool == 'avg':
                    features = torch.mean(features, 0)
                else:
                    features, _ = torch.max(features, 0)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids.numpy())
            q_camids.extend(camids.numpy())
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)

        print("Extracted features for query set, obtained {}-by-{} matrix".
              format(qf.size(0), qf.size(1)))

        gf, g_pids, g_camids = [], [], []
        for batch_idx, (imgs, pids, camids, adj) in enumerate(galleryloader):
            if use_gpu:
                imgs, adj = imgs.cuda(), adj.cuda()
            if args.test_sample in ['dense', 'skipdense']:
                b, n, s, c, h, w = imgs.size()
                imgs = imgs.view(b * n, s, c, h, w)
                adj = adj.view(b * n, adj.size(-1), adj.size(-1))
            else:
                n, s, c, h, w = imgs.size()

            end = time.time()
            features = model(imgs, adj)
            batch_time.update(time.time() - end)
            if args.test_sample in ['dense', 'skipdense']:
                features = features.view(n, 1, -1)
                if pool == 'avg':
                    features = torch.mean(features, 0)
                else:
                    features, _ = torch.max(features, 0)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids.numpy())
            g_camids.extend(camids.numpy())
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)

        print("Extracted features for gallery set, obtained {}-by-{} matrix".
              format(gf.size(0), gf.size(1)))

    print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(
        batch_time.avg, args.test_batch * args.seq_len))

    print('Computing distance matrix with metric={} ...'.format(
        args.dist_metric))
    distmat = metrics.compute_distance_matrix(qf, gf, args.dist_metric)
    distmat = distmat.numpy()

    if args.re_rank:
        print('Applying person re-ranking ...')
        distmat_qq = metrics.compute_distance_matrix(qf, qf, args.dist_metric)
        distmat_gg = metrics.compute_distance_matrix(gf, gf, args.dist_metric)
        distmat = re_ranking(distmat, distmat_qq, distmat_gg)

    print("Computing CMC and mAP")

    cmc, mAP = metrics.evaluate_rank(distmat,
                                     q_pids,
                                     g_pids,
                                     q_camids,
                                     g_camids,
                                     use_metric_mars=True)

    print("Results ----------")
    print("mAP: {:.2%}".format(mAP))
    print("CMC curve")
    for r in ranks:
        print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1]))
    print("------------------")

    if return_distmat:
        return distmat
    return cmc[0], mAP
Exemple #10
0
    def _evaluate(self,
                  epoch,
                  dataset_name='',
                  queryloader=None,
                  galleryloader=None,
                  dist_metric='euclidean',
                  normalize_feature=False,
                  visrank=False,
                  visrankactiv=False,
                  visrank_topk=10,
                  save_dir='',
                  use_metric_cuhk03=False,
                  ranks=[1, 5, 10, 20],
                  rerank=False,
                  visrankactivthr=False,
                  maskthr=0.7,
                  visdrop=False,
                  visdroptype='random'):
        batch_time = AverageMeter()

        print('Extracting features from query set ...')
        qf, qa, q_pids, q_camids, qm = [], [], [], [], [
        ]  # query features, query activations, query person IDs, query camera IDs and image drop masks
        for _, data in enumerate(queryloader):
            imgs, pids, camids = self._parse_data_for_eval(data)
            if self.use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features = self._extract_features(imgs)
            activations = self._extract_activations(imgs)
            dropmask = self._extract_drop_masks(imgs, visdrop, visdroptype)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            qf.append(features)
            qa.append(torch.Tensor(activations))
            qm.append(torch.Tensor(dropmask))
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        qm = torch.cat(qm, 0)
        qa = torch.cat(qa, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        print('Extracting features from gallery set ...')
        gf, ga, g_pids, g_camids, gm = [], [], [], [], [
        ]  # gallery features, gallery activations,  gallery person IDs, gallery camera IDs and image drop masks
        end = time.time()
        for _, data in enumerate(galleryloader):
            imgs, pids, camids = self._parse_data_for_eval(data)
            if self.use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            features = self._extract_features(imgs)
            activations = self._extract_activations(imgs)
            dropmask = self._extract_drop_masks(imgs, visdrop, visdroptype)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            gf.append(features)
            ga.append(torch.Tensor(activations))
            gm.append(torch.Tensor(dropmask))
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        gm = torch.cat(gm, 0)
        ga = torch.cat(ga, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

        print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

        if normalize_feature:
            print('Normalzing features with L2 norm ...')
            qf = F.normalize(qf, p=2, dim=1)
            gf = F.normalize(gf, p=2, dim=1)

        print(
            'Computing distance matrix with metric={} ...'.format(dist_metric))
        distmat = metrics.compute_distance_matrix(qf, gf, dist_metric)
        distmat = distmat.numpy()

        #always show results without re-ranking first
        print('Computing CMC and mAP ...')
        cmc, mAP = metrics.evaluate_rank(distmat,
                                         q_pids,
                                         g_pids,
                                         q_camids,
                                         g_camids,
                                         use_metric_cuhk03=use_metric_cuhk03)

        print('** Results **')
        print('mAP: {:.1%}'.format(mAP))
        print('CMC curve')
        for r in ranks:
            print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))

        if rerank:
            print('Applying person re-ranking ...')
            distmat_qq = metrics.compute_distance_matrix(qf, qf, dist_metric)
            distmat_gg = metrics.compute_distance_matrix(gf, gf, dist_metric)
            distmat = re_ranking(distmat, distmat_qq, distmat_gg)
            print('Computing CMC and mAP ...')
            cmc, mAP = metrics.evaluate_rank(
                distmat,
                q_pids,
                g_pids,
                q_camids,
                g_camids,
                use_metric_cuhk03=use_metric_cuhk03)

            print('** Results with Re-Ranking**')
            print('mAP: {:.1%}'.format(mAP))
            print('CMC curve')
            for r in ranks:
                print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))

        if visrank:
            visualize_ranked_results(
                distmat,
                self.datamanager.return_testdataset_by_name(dataset_name),
                self.datamanager.data_type,
                width=self.datamanager.width,
                height=self.datamanager.height,
                save_dir=osp.join(save_dir, 'visrank_' + dataset_name),
                topk=visrank_topk)
        if visrankactiv:
            visualize_ranked_activation_results(
                distmat,
                qa,
                ga,
                self.datamanager.return_testdataset_by_name(dataset_name),
                self.datamanager.data_type,
                width=self.datamanager.width,
                height=self.datamanager.height,
                save_dir=osp.join(save_dir, 'visrankactiv_' + dataset_name),
                topk=visrank_topk)
        if visrankactivthr:
            visualize_ranked_threshold_activation_results(
                distmat,
                qa,
                ga,
                self.datamanager.return_testdataset_by_name(dataset_name),
                self.datamanager.data_type,
                width=self.datamanager.width,
                height=self.datamanager.height,
                save_dir=osp.join(save_dir, 'visrankactivthr_' + dataset_name),
                topk=visrank_topk,
                threshold=maskthr)
        if visdrop:
            visualize_ranked_mask_activation_results(
                distmat,
                qa,
                ga,
                qm,
                gm,
                self.datamanager.return_testdataset_by_name(dataset_name),
                self.datamanager.data_type,
                width=self.datamanager.width,
                height=self.datamanager.height,
                save_dir=osp.join(
                    save_dir, 'visdrop_{}_{}'.format(visdroptype,
                                                     dataset_name)),
                topk=visrank_topk)

        return cmc[0]
Exemple #11
0
    def _evaluate(self,
                  epoch,
                  dataset_name='',
                  queryloader=None,
                  galleryloader=None,
                  dist_metric='euclidean',
                  normalize_feature=False,
                  visrank=False,
                  visrank_topk=20,
                  save_dir='',
                  use_metric_cuhk03=False,
                  ranks=[1, 5, 10, 20],
                  rerank=False,
                  load_pose=False,
                  part_score=False):
        batch_time = AverageMeter()

        self.model.eval()

        print('Extracting features from query set ...')
        qf, q_pids, q_camids = [], [], [
        ]  # query features, query person IDs and query camera IDs
        q_score = []
        for batch_idx, data in enumerate(queryloader):
            if load_pose:
                imgs, pids, camids, pose = self._parse_data_for_eval(data)
            else:
                imgs, pids, camids = self._parse_data_for_eval(data)
            if self.use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            if load_pose:
                if part_score:
                    features, score = self._extract_features(imgs, pose)
                    score = score.data.cpu()
                    q_score.append(score)
                else:
                    features = self._extract_features(imgs, pose)
            else:
                features = self._extract_features(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            qf.append(features)
            q_pids.extend(pids)
            q_camids.extend(camids)
        qf = torch.cat(qf, 0)
        q_pids = np.asarray(q_pids)
        q_camids = np.asarray(q_camids)
        if part_score:
            q_score = torch.cat(q_score)
        print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))

        print('Extracting features from gallery set ...')
        gf, g_pids, g_camids = [], [], [
        ]  # gallery features, gallery person IDs and gallery camera IDs
        g_score = []
        end = time.time()
        for batch_idx, data in enumerate(galleryloader):
            if load_pose:
                imgs, pids, camids, pose = self._parse_data_for_eval(data)
            else:
                imgs, pids, camids = self._parse_data_for_eval(data)
            if self.use_gpu:
                imgs = imgs.cuda()
            end = time.time()
            if load_pose:
                # if part_score:
                if part_score:
                    features, score = self._extract_features(imgs, pose)
                    score = score.data.cpu()
                    g_score.append(score)
                else:
                    features = self._extract_features(imgs, pose)
            else:
                features = self._extract_features(imgs)
            batch_time.update(time.time() - end)
            features = features.data.cpu()
            gf.append(features)
            g_pids.extend(pids)
            g_camids.extend(camids)
        gf = torch.cat(gf, 0)
        g_pids = np.asarray(g_pids)
        g_camids = np.asarray(g_camids)
        if part_score:
            g_score = torch.cat(g_score)
        print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))

        print('Speed: {:.4f} sec/batch'.format(batch_time.avg))

        if normalize_feature:
            print('Normalzing features with L2 norm ...')
            qf = F.normalize(qf, p=2, dim=1)
            gf = F.normalize(gf, p=2, dim=1)

        print(
            'Computing distance matrix with metric={} ...'.format(dist_metric))
        if part_score:
            distmat = metrics.compute_weight_distance_matrix(
                qf, gf, q_score, g_score, dist_metric)
        else:
            distmat = metrics.compute_distance_matrix(qf, gf, dist_metric)
        distmat = distmat.numpy()

        if rerank:
            print('Applying person re-ranking ...')
            distmat_qq = metrics.compute_distance_matrix(qf, qf, dist_metric)
            distmat_gg = metrics.compute_distance_matrix(gf, gf, dist_metric)
            distmat = re_ranking(distmat, distmat_qq, distmat_gg)

        print('Computing CMC and mAP ...')
        cmc, mAP = metrics.evaluate_rank(distmat,
                                         q_pids,
                                         g_pids,
                                         q_camids,
                                         g_camids,
                                         use_metric_cuhk03=use_metric_cuhk03)

        print('** Results **')
        print('mAP: {:.1%}'.format(mAP))
        print('CMC curve')
        for r in ranks:
            print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))

        if visrank:
            visualize_ranked_results(
                distmat,
                self.datamanager.return_testdataset_by_name(dataset_name),
                save_dir=osp.join(save_dir, 'visrank-' + str(epoch + 1),
                                  dataset_name),
                topk=visrank_topk)

        return cmc[0]