Ejemplo n.º 1
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            pred_logits = comm.gather(self.pred_logits)
            pred_logits = sum(pred_logits, [])

            labels = comm.gather(self.labels)
            labels = sum(labels, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            pred_logits = self.pred_logits
            labels = self.labels

        pred_logits = torch.cat(pred_logits, dim=0)
        labels = torch.stack(labels)

        # measure accuracy and record loss
        acc1, = accuracy(pred_logits, labels, topk=(1, ))

        self._results = OrderedDict()
        self._results["Acc@1"] = acc1

        self._results["metric"] = acc1

        return copy.deepcopy(self._results)
Ejemplo n.º 2
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process(): return {}

        else:
            predictions = self._predictions

        pred_logits = []
        labels = []
        for prediction in predictions:
            pred_logits.append(prediction['logits'])
            labels.append(prediction['labels'])

        pred_logits = torch.cat(pred_logits, dim=0)
        labels = torch.cat(labels, dim=0)

        # measure accuracy and record loss
        acc1, = accuracy(pred_logits, labels, topk=(1, ))

        self._results = OrderedDict()
        self._results["Acc@1"] = acc1

        self._results["metric"] = acc1

        return copy.deepcopy(self._results)
    def _write_metrics(self, metrics_dict: dict):
        """
        Args:
            metrics_dict (dict): dict of scalar metrics
        """
        metrics_dict = {
            k: v.detach().cpu().item()
            if isinstance(v, torch.Tensor) else float(v)
            for k, v in metrics_dict.items()
        }
        # gather metrics among all workers for logging
        # This assumes we do DDP-style training, which is currently the only
        # supported method in fastreid.
        all_metrics_dict = comm.gather(metrics_dict)

        if comm.is_main_process():
            if "data_time" in all_metrics_dict[0]:
                # data_time among workers can have high variance. The actual latency
                # caused by data_time is the maximum among workers.
                data_time = np.max(
                    [x.pop("data_time") for x in all_metrics_dict])
                self.storage.put_scalar("data_time", data_time)

            # average the rest metrics
            metrics_dict = {
                k: np.mean([x[k] for x in all_metrics_dict])
                for k in all_metrics_dict[0].keys()
            }
            total_losses_reduced = sum(loss for loss in metrics_dict.values())

            self.storage.put_scalar("total_loss", total_losses_reduced)
            if len(metrics_dict) > 1:
                self.storage.put_scalars(**metrics_dict)
Ejemplo n.º 4
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return {}
        else:
            predictions = self._predictions

        features = []
        pids = []
        # camids = []
        for prediction in predictions:
            features.append(prediction['feats'])
            pids.append(prediction['pids'])
            # camids.append(prediction['camids'])

        features = torch.cat(features, dim=0)
        pids = torch.cat(pids, dim=0).numpy()

        rerank_dist = compute_jaccard_distance(
            features,
            k1=self.cfg.CLUSTER.JACCARD.K1,
            k2=self.cfg.CLUSTER.JACCARD.K2,
        )
        pseudo_labels = self.cluster.fit_predict(rerank_dist)

        contingency_matrix = metrics.cluster.contingency_matrix(
            pids, pseudo_labels)
        purity = np.sum(np.amax(contingency_matrix,
                                axis=0)) / np.sum(contingency_matrix)
        return purity
Ejemplo n.º 5
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features

        features = torch.cat(features, dim=0)
        features = F.normalize(features, p=2, dim=1).numpy()

        self._results = OrderedDict()
        tpr, fpr, accuracy, best_thresholds = evaluate(features, self.labels)

        self._results["Accuracy"] = accuracy.mean() * 100
        self._results["Threshold"] = best_thresholds.mean()
        self._results["metric"] = accuracy.mean() * 100

        buf = gen_plot(fpr, tpr)
        roc_curve = Image.open(buf)

        PathManager.mkdirs(self._output_dir)
        roc_curve.save(
            os.path.join(self._output_dir, self.dataset_name + "_roc.png"))

        return copy.deepcopy(self._results)
Ejemplo n.º 6
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return {}
        else:
            predictions = self._predictions

        features = []
        pids = []
        # camids = []
        for prediction in predictions:
            features.append(prediction['feats'])
            pids.append(prediction['pids'])
            # camids.append(prediction['camids'])

        features = torch.cat(features, dim=0)
        pids = torch.cat(pids, dim=0).numpy()

        rerank_dist = compute_jaccard_distance(
            features,
            k1=self.cfg.CLUSTER.JACCARD.K1,
            k2=self.cfg.CLUSTER.JACCARD.K2,
        )
        pseudo_labels = self.cluster.fit_predict(rerank_dist)

        ARI_score = metrics.adjusted_rand_score(pids, pseudo_labels)

        return ARI_score
Ejemplo n.º 7
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            labels = comm.gather(self.labels)
            labels = sum(labels, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features
            labels = self.labels

        features = torch.cat(features, dim=0)
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_labels = np.asarray(labels[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(labels[self._num_query:])

        self._results = OrderedDict()

        if self._num_query == len(features):
            cmc = recall_at_ks(query_features,
                               query_labels,
                               self.recalls,
                               cosine=True)
        else:
            cmc = recall_at_ks(query_features,
                               query_labels,
                               self.recalls,
                               gallery_features,
                               gallery_pids,
                               cosine=True)

        for r in self.recalls:
            self._results['Recall@{}'.format(r)] = cmc[r]
        self._results["metric"] = cmc[self.recalls[0]]

        return copy.deepcopy(self._results)
Ejemplo n.º 8
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            pred_logits = comm.gather(self.pred_logits)
            pred_logits = sum(pred_logits, [])

            gt_labels = comm.gather(self.gt_labels)
            gt_labels = sum(gt_labels, [])

            if not comm.is_main_process():
                return {}
        else:
            pred_logits = self.pred_logits
            gt_labels = self.gt_labels

        pred_logits = torch.stack(pred_logits, dim=0).numpy()
        gt_labels = torch.stack(gt_labels, dim=0).numpy()

        # Pedestrian attribute metrics
        thres = self.cfg.TEST.THRES
        self._results = self.get_attr_metrics(gt_labels, pred_logits, thres)

        return copy.deepcopy(self._results)
Ejemplo n.º 9
0
    def _write_metrics(self, loss_dict: Dict[str, torch.Tensor],
                       data_time: float):
        """
        Args:
            loss_dict (dict): dict of scalar losses
            data_time (float): time taken by the dataloader iteration
        """
        device = next(iter(loss_dict.values())).device

        # Use a new stream so these ops don't wait for DDP or backward
        with torch.cuda.stream(torch.cuda.Stream() if device.type ==
                               "cuda" else None):
            metrics_dict = {
                k: v.detach().cpu().item()
                for k, v in loss_dict.items()
            }
            metrics_dict["data_time"] = data_time

            # Gather metrics among all workers for logging
            # This assumes we do DDP-style training, which is currently the only
            # supported method in detectron2.
            all_metrics_dict = comm.gather(metrics_dict)

        if comm.is_main_process():
            storage = get_event_storage()

            # data_time among workers can have high variance. The actual latency
            # caused by data_time is the maximum among workers.
            data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
            storage.put_scalar("data_time", data_time)

            # average the rest metrics
            metrics_dict = {
                k: np.mean([x[k] for x in all_metrics_dict])
                for k in all_metrics_dict[0].keys()
            }
            total_losses_reduced = sum(metrics_dict.values())
            if not np.isfinite(total_losses_reduced):
                raise FloatingPointError(
                    f"Loss became infinite or NaN at iteration={self.iter}!\n"
                    f"loss_dict = {metrics_dict}")

            storage.put_scalar("total_loss", total_losses_reduced)
            if len(metrics_dict) > 1:
                storage.put_scalars(**metrics_dict)
Ejemplo n.º 10
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process(): return {}

        else:
            predictions = self._predictions

        total_correct_num = 0
        total_samples = 0
        for prediction in predictions:
            total_correct_num += prediction["num_correct"]
            total_samples += prediction["num_samples"]

        acc1 = total_correct_num / total_samples * 100

        self._results = OrderedDict()
        self._results["Acc@1"] = acc1
        self._results["metric"] = acc1

        return copy.deepcopy(self._results)
Ejemplo n.º 11
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            camids = comm.gather(self.camids)
            camids = sum(camids, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features
            pids = self.pids
            camids = self.camids

        features = torch.cat(features, dim=0)
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])
        query_camids = np.asarray(camids[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])
        gallery_camids = np.asarray(camids[self._num_query:])

        self._results = OrderedDict()

        if self.cfg.TEST.AQE.ENABLED:
            logger.info("Test with AQE setting")
            qe_time = self.cfg.TEST.AQE.QE_TIME
            qe_k = self.cfg.TEST.AQE.QE_K
            alpha = self.cfg.TEST.AQE.ALPHA
            query_features, gallery_features = aqe(query_features, gallery_features, qe_time, qe_k, alpha)

        dist = build_dist(query_features, gallery_features, self.cfg.TEST.METRIC)

        if self.cfg.TEST.RERANK.ENABLED:
            logger.info("Test with rerank setting")
            k1 = self.cfg.TEST.RERANK.K1
            k2 = self.cfg.TEST.RERANK.K2
            lambda_value = self.cfg.TEST.RERANK.LAMBDA

            if self.cfg.TEST.METRIC == "cosine":
                query_features = F.normalize(query_features, dim=1)
                gallery_features = F.normalize(gallery_features, dim=1)

            rerank_dist = build_dist(query_features, gallery_features, metric="jaccard", k1=k1, k2=k2)
            dist = rerank_dist * (1 - lambda_value) + dist * lambda_value

        cmc, all_AP, all_INP = evaluate_rank(dist, query_pids, gallery_pids, query_camids, gallery_camids)

        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)
        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1]
        self._results['mAP'] = mAP
        self._results['mINP'] = mINP
        self._results["metric"] = (mAP + cmc[0]) / 2

        if self.cfg.TEST.ROC_ENABLED:
            scores, labels = evaluate_roc(dist, query_pids, gallery_pids, query_camids, gallery_camids)
            fprs, tprs, thres = metrics.roc_curve(labels, scores)

            for fpr in [1e-4, 1e-3, 1e-2]:
                ind = np.argmin(np.abs(fprs - fpr))
                self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]

        return copy.deepcopy(self._results)
Ejemplo n.º 12
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features
            pids = self.pids

        features = torch.cat(features, dim=0)
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])

        if self.cfg.TEST.AQE.ENABLED:
            logger.info("Test with AQE setting")
            qe_time = self.cfg.TEST.AQE.QE_TIME
            qe_k = self.cfg.TEST.AQE.QE_K
            alpha = self.cfg.TEST.AQE.ALPHA
            query_features, gallery_features = aqe(query_features,
                                                   gallery_features, qe_time,
                                                   qe_k, alpha)

        if self.cfg.TEST.METRIC == "cosine":
            query_features = F.normalize(query_features, dim=1)
            gallery_features = F.normalize(gallery_features, dim=1)

        dist = build_dist(query_features, gallery_features,
                          self.cfg.TEST.METRIC)

        if self.cfg.TEST.RERANK.ENABLED:
            logger.info("Test with rerank setting")
            k1 = self.cfg.TEST.RERANK.K1
            k2 = self.cfg.TEST.RERANK.K2
            lambda_value = self.cfg.TEST.RERANK.LAMBDA

            if self.cfg.TEST.METRIC == "cosine":
                query_features = F.normalize(query_features, dim=1)
                gallery_features = F.normalize(gallery_features, dim=1)

            rerank_dist = build_dist(query_features,
                                     gallery_features,
                                     metric="jaccard",
                                     k1=k1,
                                     k2=k2)
            dist = rerank_dist * (1 - lambda_value) + dist * lambda_value

        if self.cfg.TEST.SAVE_DISTMAT:
            np.save(os.path.join(self.cfg.OUTPUT_DIR, "distmat.npy"), dist)

        results = defaultdict(list)

        topk_indices = partition_arg_topK(dist, K=200, axis=1)
        for i in range(topk_indices.shape[0]):
            results[query_pids[i]].extend(gallery_pids[topk_indices[i]])

        with open(os.path.join(self.cfg.OUTPUT_DIR, "submit.json"), 'w') as f:
            json.dump(results, f)

        return {}
Ejemplo n.º 13
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            camids = comm.gather(self.camids)
            camids = sum(camids, [])

            names = comm.gather(self.names)
            names = sum(names, [])

            if not comm.is_main_process():
                return {}
        else:
            features = self.features
            pids = self.pids
            camids = self.camids
            names = self.names

        features = torch.cat(features, dim=0)
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])
        query_camids = np.asarray(camids[:self._num_query])
        query_names = names[:self._num_query]

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])
        gallery_camids = np.asarray(camids[self._num_query:])
        gallery_names = names[self._num_query:]

        # save_dicts = {'q_feat': query_features.numpy(),
        #               'q_id': query_pids,
        #               'q_cam': query_camids,
        #               'q_name': query_names,
        #               'g_feat': gallery_features.numpy(),
        #               'g_id': gallery_pids,
        #               'g_cam': gallery_camids,
        #               'g_name': gallery_names}
        # import pickle
        # print('Saving features ...')
        # with open('feats/pcl_filter_flip_aug.pkl', 'wb') as f:
        #     pickle.dump(save_dicts, f)

        self._results = OrderedDict()

        if self.cfg.TEST.AQE.ENABLED:
            logger.info("Test with AQE setting")
            qe_time = self.cfg.TEST.AQE.QE_TIME
            qe_k = self.cfg.TEST.AQE.QE_K
            alpha = self.cfg.TEST.AQE.ALPHA
            query_features, gallery_features = aqe(query_features,
                                                   gallery_features, qe_time,
                                                   qe_k, alpha)

        if self.cfg.TEST.METRIC == "cosine":
            query_features = F.normalize(query_features, dim=1)
            gallery_features = F.normalize(gallery_features, dim=1)

        dist = self.cal_dist(self.cfg.TEST.METRIC, query_features,
                             gallery_features)

        if self.cfg.TEST.RERANK.ENABLED:
            logger.info("Test with rerank setting")
            k1 = self.cfg.TEST.RERANK.K1
            k2 = self.cfg.TEST.RERANK.K2
            lambda_value = self.cfg.TEST.RERANK.LAMBDA
            q_q_dist = self.cal_dist(self.cfg.TEST.METRIC, query_features,
                                     query_features)
            g_g_dist = self.cal_dist(self.cfg.TEST.METRIC, gallery_features,
                                     gallery_features)
            re_dist = re_ranking(dist, q_q_dist, g_g_dist, k1, k2,
                                 lambda_value)
            query_features = query_features.numpy()
            gallery_features = gallery_features.numpy()
            cmc, all_AP, all_INP = evaluate_rank(re_dist,
                                                 query_features,
                                                 gallery_features,
                                                 query_pids,
                                                 gallery_pids,
                                                 query_camids,
                                                 gallery_camids,
                                                 use_distmat=True)
        else:
            query_features = query_features.numpy()
            gallery_features = gallery_features.numpy()
            cmc, all_AP, all_INP = evaluate_rank(dist,
                                                 query_features,
                                                 gallery_features,
                                                 query_pids,
                                                 gallery_pids,
                                                 query_camids,
                                                 gallery_camids,
                                                 use_distmat=False)
        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)
        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1]
        self._results['mAP'] = mAP
        self._results['mINP'] = mINP

        if self.cfg.TEST.ROC_ENABLED:
            scores, labels = evaluate_roc(dist, query_features,
                                          gallery_features, query_pids,
                                          gallery_pids, query_camids,
                                          gallery_camids)
            fprs, tprs, thres = metrics.roc_curve(labels, scores)

            for fpr in [1e-4, 1e-3, 1e-2]:
                ind = np.argmin(np.abs(fprs - fpr))
                self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]

        return copy.deepcopy(self._results)
Ejemplo n.º 14
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            spatial_features = comm.gather(self.spatial_features)
            spatial_features = sum(spatial_features, [])

            scores = comm.gather(self.scores)
            scores = sum(scores, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            camids = comm.gather(self.camids)
            camids = sum(camids, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features
            spatial_features = self.spatial_features
            scores = self.scores
            pids = self.pids
            camids = self.camids

        features = torch.cat(features, dim=0)
        spatial_features = torch.cat(spatial_features, dim=0).numpy()
        scores = torch.cat(scores, dim=0)

        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])
        query_camids = np.asarray(camids[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])
        gallery_camids = np.asarray(camids[self._num_query:])

        if self.cfg.TEST.METRIC == "cosine":
            query_features = F.normalize(query_features, dim=1)
            gallery_features = F.normalize(gallery_features, dim=1)

        dist = 1 - torch.mm(query_features, gallery_features.t()).numpy()
        self._results = OrderedDict()

        query_features = query_features.numpy()
        gallery_features = gallery_features.numpy()
        if self.cfg.TEST.DSR.ENABLED:
            logger.info("Testing with DSR setting")
            dsr_dist = compute_dsr_dist(spatial_features[:self._num_query],
                                        spatial_features[self._num_query:],
                                        dist, scores[:self._num_query])

            max_value = 0
            k = 0
            for i in range(0, 101):
                lamb = 0.01 * i
                dist1 = (1 - lamb) * dist + lamb * dsr_dist
                cmc, all_AP, all_INP = evaluate_rank(dist1, query_pids,
                                                     gallery_pids,
                                                     query_camids,
                                                     gallery_camids)
                if (cmc[0] > max_value):
                    k = lamb
                    max_value = cmc[0]
            dist1 = (1 - k) * dist + k * dsr_dist
            cmc, all_AP, all_INP = evaluate_rank(dist1, query_pids,
                                                 gallery_pids, query_camids,
                                                 gallery_camids)
        else:
            cmc, all_AP, all_INP = evaluate_rank(dist, query_pids,
                                                 gallery_pids, query_camids,
                                                 gallery_camids)

        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)
        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1] * 100
        self._results['mAP'] = mAP * 100
        self._results['mINP'] = mINP * 100

        return copy.deepcopy(self._results)
Ejemplo n.º 15
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return {}

        else:
            predictions = self._predictions

        features = []
        pids = []
        camids = []
        for prediction in predictions:
            features.append(prediction['feats'])
            pids.append(prediction['pids'])
            camids.append(prediction['camids'])

        features = torch.cat(features, dim=0)
        pids = torch.cat(pids, dim=0).numpy()
        camids = torch.cat(camids, dim=0).numpy()
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = pids[:self._num_query]
        query_camids = camids[:self._num_query]

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = pids[self._num_query:]
        gallery_camids = camids[self._num_query:]

        query_features = query_features.numpy()
        gallery_features = gallery_features.numpy()

        self._results = OrderedDict()

        if self.cfg.TEST.AQE.ENABLED:
            logger.info("Test with AQE setting")
            qe_time = self.cfg.TEST.AQE.QE_TIME
            qe_k = self.cfg.TEST.AQE.QE_K
            alpha = self.cfg.TEST.AQE.ALPHA
            query_features, gallery_features = aqe(query_features,
                                                   gallery_features, qe_time,
                                                   qe_k, alpha)

        query_features = query_features.numpy()
        gallery_features = gallery_features.numpy()
        dist = build_dist(query_features, gallery_features,
                          self.cfg.TEST.METRIC)

        if self.cfg.TEST.RERANK.ENABLED:
            logger.info("Test with rerank setting")
            k1 = self.cfg.TEST.RERANK.K1
            k2 = self.cfg.TEST.RERANK.K2
            lambda_value = self.cfg.TEST.RERANK.LAMBDA

            if self.cfg.TEST.METRIC == "cosine":
                query_features = F.normalize(query_features, dim=1)
                gallery_features = F.normalize(gallery_features, dim=1)

            rerank_dist = build_dist(query_features,
                                     gallery_features,
                                     metric="jaccard",
                                     k1=k1,
                                     k2=k2)
            dist = rerank_dist * (1 - lambda_value) + dist * lambda_value

        cmc, all_AP, all_INP = evaluate_rank(dist, query_pids, gallery_pids,
                                             query_camids, gallery_camids)

        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)
        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1] * 100
        self._results['mAP'] = mAP * 100
        self._results['mINP'] = mINP * 100
        self._results["metric"] = (mAP + cmc[0]) / 2 * 100

        if self.cfg.TEST.ROC.ENABLED:
            scores, labels = evaluate_roc(dist, query_pids, gallery_pids,
                                          query_camids, gallery_camids)
            fprs, tprs, thres = metrics.roc_curve(labels, scores)

            for fpr in [1e-4, 1e-3, 1e-2]:
                ind = np.argmin(np.abs(fprs - fpr))
                self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]

        return copy.deepcopy(self._results)
Ejemplo n.º 16
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return {}
        else:
            predictions = self._predictions

        features = []
        pids = []
        # camids = []
        for prediction in predictions:
            features.append(prediction['feats'])
            pids.append(prediction['pids'])
            # camids.append(prediction['camids'])

        features = torch.cat(features, dim=0)
        pids = torch.cat(pids, dim=0).numpy()

        rerank_dist = compute_jaccard_distance(
            features,
            k1=self.cfg.CLUSTER.JACCARD.K1,
            k2=self.cfg.CLUSTER.JACCARD.K2,
        )
        pseudo_labels = self.cluster.fit_predict(rerank_dist)

        ARI_score = metrics.adjusted_rand_score(pids, pseudo_labels)
        contingency_matrix = metrics.cluster.contingency_matrix(
            pids, pseudo_labels)
        purity = np.sum(np.amax(contingency_matrix,
                                axis=0)) / np.sum(contingency_matrix)
        return ARI_score, purity


# def extract_cnn_feature(model, inputs, return_branches=False):
#     inputs = to_torch(inputs).cuda()
#     if return_branches:
#         outputs, branches_lists = model(inputs, return_branches=True)
#         outputs = outputs.data.cpu()
#         for i in range(len(branches_lists)):
#             branches_lists[i] = branches_lists[i].data.cpu()
#         return outputs, branches_lists
#     else:
#         outputs = model(inputs)
#         outputs = outputs.data.cpu()
#         return outputs

# def extract_features(model, data_loader, print_freq=50, with_mem_idx=False, return_branches=False):
#     model.eval()
#     batch_time = AverageMeter()
#     data_time = AverageMeter()

#     features = OrderedDict()
#     labels = OrderedDict()

#     end = time.time()
#     with torch.no_grad():
#         for i, data in enumerate(data_loader):
#             if with_mem_idx:
#                 imgs, fnames, pids, _, _ = data
#             else:
#                 imgs, fnames, pids, _ = data
#             data_time.update(time.time() - end)

#             if return_branches:
#                 outputs, branches_lists = extract_cnn_feature(model, imgs, return_branches=True)
#                 for i_batch, (fname, output, pid) in enumerate(zip(fnames, outputs, pids)):
#                     features[fname] = {
#                         'feat': output,
#                         'branches_list': [branches_lists[j_branch][i_batch] for j_branch in range(len(branches_lists))]
#                     }
#                     labels[fname] = pid
#             else:
#                 outputs = extract_cnn_feature(model, imgs)
#                 for fname, output, pid in zip(fnames, outputs, pids):
#                     features[fname] = output
#                     labels[fname] = pid

#             batch_time.update(time.time() - end)
#             end = time.time()

#             if (i + 1) % print_freq == 0:
#                 print('Extract Features: [{}/{}]\t'
#                       'Time {:.3f} ({:.3f})\t'
#                       'Data {:.3f} ({:.3f})\t'
#                       .format(i + 1, len(data_loader),
#                               batch_time.val, batch_time.avg,
#                               data_time.val, data_time.avg))

#     return features, labels
Ejemplo n.º 17
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            camids = comm.gather(self.camids)
            camids = sum(camids, [])

            img_paths = comm.gather(self.img_paths)
            img_paths = sum(img_paths, [])

            if not comm.is_main_process():
                return {}
        else:
            features = self.features
            pids = self.pids
            camids = self.camids
            img_paths = self.img_paths

        features = torch.cat(features, dim=0)
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])
        query_camids = np.asarray(camids[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])
        gallery_camids = np.asarray(camids[self._num_query:])

        self._results = OrderedDict()

        if self.cfg.TEST.AQE.ENABLED:
            logger.info("Test with AQE setting")
            qe_time = self.cfg.TEST.AQE.QE_TIME
            qe_k = self.cfg.TEST.AQE.QE_K
            alpha = self.cfg.TEST.AQE.ALPHA
            query_features, gallery_features = aqe(query_features, gallery_features, qe_time, qe_k, alpha)

        if self.cfg.TEST.METRIC == "cosine":
            query_features = F.normalize(query_features, dim=1)
            gallery_features = F.normalize(gallery_features, dim=1)

        dist = self.cal_dist(self.cfg.TEST.METRIC, query_features, gallery_features)
        
        if self.cfg.TEST.RERANK.ENABLED:
            logger.info("Test with rerank setting")
            k1 = self.cfg.TEST.RERANK.K1
            k2 = self.cfg.TEST.RERANK.K2
            lambda_value = self.cfg.TEST.RERANK.LAMBDA
            # q_q_dist = self.cal_dist(self.cfg.TEST.METRIC, query_features, query_features)
            # g_g_dist = self.cal_dist(self.cfg.TEST.METRIC, gallery_features, gallery_features)
            # re_dist = re_ranking(dist, q_q_dist, g_g_dist, k1, k2, lambda_value)
            # Luo rerank
            re_dist = re_ranking(query_features, gallery_features, k1, k2, lambda_value)
            print('re_dist',re_dist.shape)
            query_features = query_features.numpy()
            gallery_features = gallery_features.numpy()
            # gen json
            eval_json(img_paths,self._num_query,re_dist,query_features,gallery_features,self._output_dir,use_distmat=True)
            return 
            #
            cmc, all_AP, all_INP = evaluate_rank(re_dist, query_features, gallery_features,
                                                 query_pids, gallery_pids, query_camids,
                                                 gallery_camids, use_distmat=True)
        else:
            query_features = query_features.numpy()
            gallery_features = gallery_features.numpy()
            # gen json
            eval_json(img_paths,self._num_query,dist,query_features,gallery_features,self._output_dir,use_distmat=False)
            return
            # 
            cmc, all_AP, all_INP = evaluate_rank(dist, query_features, gallery_features,
                                                 query_pids, gallery_pids, query_camids, gallery_camids,
                                                 use_distmat=False)
        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)
        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1]
        self._results['mAP'] = mAP
        self._results['mINP'] = mINP

        if self.cfg.TEST.ROC_ENABLED:
            scores, labels = evaluate_roc(dist, query_features, gallery_features,
                                          query_pids, gallery_pids, query_camids, gallery_camids)
            fprs, tprs, thres = metrics.roc_curve(labels, scores)

            for fpr in [1e-4, 1e-3, 1e-2]:
                ind = np.argmin(np.abs(fprs - fpr))
                self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]

        return copy.deepcopy(self._results)
Ejemplo n.º 18
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            spatial_features = comm.gather(self.spatial_features)
            spatial_features = sum(spatial_features, [])

            scores = comm.gather(self.scores)
            scores = sum(scores, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            camids = comm.gather(self.camids)
            camids = sum(camids, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features
            spatial_features = self.spatial_features
            scores = self.scores
            pids = self.pids
            camids = self.camids

        features = torch.cat(features, dim=0)
        spatial_features = torch.cat(spatial_features, dim=0).numpy()
        scores = torch.cat(scores, dim=0)

        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])
        query_camids = np.asarray(camids[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])
        gallery_camids = np.asarray(camids[self._num_query:])

        if self.cfg.TEST.METRIC == "cosine":
            query_features = F.normalize(query_features, dim=1)
            gallery_features = F.normalize(gallery_features, dim=1)

        dist = 1 - torch.mm(query_features, gallery_features.t()).numpy()
        self._results = OrderedDict()

        query_features = query_features.numpy()
        gallery_features = gallery_features.numpy()
        if self.cfg.TEST.DSR.ENABLED:
            logger.info("Testing with DSR setting")
            dist = compute_dsr_dist(spatial_features[:self._num_query],
                                    spatial_features[self._num_query:], dist,
                                    scores[:self._num_query])
            cmc, all_AP, all_INP = evaluate_rank(dist,
                                                 query_features,
                                                 gallery_features,
                                                 query_pids,
                                                 gallery_pids,
                                                 query_camids,
                                                 gallery_camids,
                                                 use_distmat=True)
        else:
            cmc, all_AP, all_INP = evaluate_rank(dist,
                                                 query_features,
                                                 gallery_features,
                                                 query_pids,
                                                 gallery_pids,
                                                 query_camids,
                                                 gallery_camids,
                                                 use_distmat=False)
        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)

        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1]
        self._results['mAP'] = mAP
        self._results['mINP'] = mINP

        if self.cfg.TEST.ROC_ENABLED:
            scores, labels = evaluate_roc(dist, query_features,
                                          gallery_features, query_pids,
                                          gallery_pids, query_camids,
                                          gallery_camids)
            fprs, tprs, thres = metrics.roc_curve(labels, scores)

            for fpr in [1e-4, 1e-3, 1e-2]:
                ind = np.argmin(np.abs(fprs - fpr))
                self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]

        return copy.deepcopy(self._results)