Exemplo n.º 1
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            pred_logits = comm.gather(self.pred_logits)
            pred_logits = sum(pred_logits, [])

            labels = comm.gather(self.labels)
            labels = sum(labels, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            pred_logits = self.pred_logits
            labels = self.labels

        pred_logits = torch.cat(pred_logits, dim=0)
        labels = torch.stack(labels)

        # measure accuracy and record loss
        acc1, = accuracy(pred_logits, labels, topk=(1, ))

        self._results = OrderedDict()
        self._results["Acc@1"] = acc1

        self._results["metric"] = acc1

        return copy.deepcopy(self._results)
Exemplo n.º 2
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return {}
        else:
            predictions = self._predictions

        features = []
        pids = []
        # camids = []
        for prediction in predictions:
            features.append(prediction['feats'])
            pids.append(prediction['pids'])
            # camids.append(prediction['camids'])

        features = torch.cat(features, dim=0)
        pids = torch.cat(pids, dim=0).numpy()

        rerank_dist = compute_jaccard_distance(
            features,
            k1=self.cfg.CLUSTER.JACCARD.K1,
            k2=self.cfg.CLUSTER.JACCARD.K2,
        )
        pseudo_labels = self.cluster.fit_predict(rerank_dist)

        contingency_matrix = metrics.cluster.contingency_matrix(
            pids, pseudo_labels)
        purity = np.sum(np.amax(contingency_matrix,
                                axis=0)) / np.sum(contingency_matrix)
        return purity
Exemplo n.º 3
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features

        features = torch.cat(features, dim=0)
        features = F.normalize(features, p=2, dim=1).numpy()

        self._results = OrderedDict()
        tpr, fpr, accuracy, best_thresholds = evaluate(features, self.labels)

        self._results["Accuracy"] = accuracy.mean() * 100
        self._results["Threshold"] = best_thresholds.mean()
        self._results["metric"] = accuracy.mean() * 100

        buf = gen_plot(fpr, tpr)
        roc_curve = Image.open(buf)

        PathManager.mkdirs(self._output_dir)
        roc_curve.save(
            os.path.join(self._output_dir, self.dataset_name + "_roc.png"))

        return copy.deepcopy(self._results)
Exemplo n.º 4
0
def _distributed_worker(
        local_rank, main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args
):
    assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
    global_rank = machine_rank * num_gpus_per_machine + local_rank
    try:
        dist.init_process_group(
            backend="NCCL", init_method=dist_url, world_size=world_size, rank=global_rank
        )
    except Exception as e:
        logger = logging.getLogger(__name__)
        logger.error("Process group URL: {}".format(dist_url))
        raise e
    # synchronize is needed here to prevent a possible timeout after calling init_process_group
    # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
    comm.synchronize()

    assert num_gpus_per_machine <= torch.cuda.device_count()
    torch.cuda.set_device(local_rank)

    # Setup the local process group (which contains ranks within the same machine)
    assert comm._LOCAL_PROCESS_GROUP is None
    num_machines = world_size // num_gpus_per_machine
    for i in range(num_machines):
        ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
        pg = dist.new_group(ranks_on_i)
        if i == machine_rank:
            comm._LOCAL_PROCESS_GROUP = pg

    main_func(*args)
Exemplo n.º 5
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return {}
        else:
            predictions = self._predictions

        features = []
        pids = []
        # camids = []
        for prediction in predictions:
            features.append(prediction['feats'])
            pids.append(prediction['pids'])
            # camids.append(prediction['camids'])

        features = torch.cat(features, dim=0)
        pids = torch.cat(pids, dim=0).numpy()

        rerank_dist = compute_jaccard_distance(
            features,
            k1=self.cfg.CLUSTER.JACCARD.K1,
            k2=self.cfg.CLUSTER.JACCARD.K2,
        )
        pseudo_labels = self.cluster.fit_predict(rerank_dist)

        ARI_score = metrics.adjusted_rand_score(pids, pseudo_labels)

        return ARI_score
Exemplo n.º 6
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process(): return {}

        else:
            predictions = self._predictions

        pred_logits = []
        labels = []
        for prediction in predictions:
            pred_logits.append(prediction['logits'])
            labels.append(prediction['labels'])

        pred_logits = torch.cat(pred_logits, dim=0)
        labels = torch.cat(labels, dim=0)

        # measure accuracy and record loss
        acc1, = accuracy(pred_logits, labels, topk=(1, ))

        self._results = OrderedDict()
        self._results["Acc@1"] = acc1

        self._results["metric"] = acc1

        return copy.deepcopy(self._results)
Exemplo n.º 7
0
 def after_epoch(self):
     next_epoch = self.trainer.epoch + 1
     is_final = next_epoch == self.trainer.max_epoch
     if is_final or (self._period > 0 and next_epoch % self._period == 0):
         self._do_eval()
     # Evaluation may take different time among workers.
     # A barrier make them start the next iteration together.
     comm.synchronize()
Exemplo n.º 8
0
def init_pretrained_weights(key):
    """Initializes model with pretrained weights.

    Layers that don't match with pretrained layers in name or size are kept unchanged.
    """
    import os
    import errno
    import gdown

    def _get_torch_home():
        ENV_TORCH_HOME = 'TORCH_HOME'
        ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
        DEFAULT_CACHE_DIR = '~/.cache'
        torch_home = os.path.expanduser(
            os.getenv(
                ENV_TORCH_HOME,
                os.path.join(
                    os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
                )
            )
        )
        return torch_home

    torch_home = _get_torch_home()
    model_dir = os.path.join(torch_home, 'checkpoints')
    try:
        os.makedirs(model_dir)
    except OSError as e:
        if e.errno == errno.EEXIST:
            # Directory already exists, ignore.
            pass
        else:
            # Unexpected OSError, re-raise.
            raise

    filename = model_urls[key].split('/')[-1]

    cached_file = os.path.join(model_dir, filename)

    if not os.path.exists(cached_file):
        if comm.is_main_process():
            gdown.download(model_urls[key], cached_file, quiet=False)

    comm.synchronize()

    logger.info(f"Loading pretrained model from {cached_file}")
    state_dict = torch.load(cached_file, map_location=torch.device('cpu'))

    return state_dict
Exemplo n.º 9
0
 def after_step(self):
     next_iter = self.trainer.iter + 1
     is_final = next_iter == self.trainer.max_iter
     if is_final or (self._period > 0 and next_iter % self._period == 0):
         results = self._do_eval()
         for task in results.keys():
             if results[task]['R-1'] > self.best_top1[task]:
                 self.best_top1[task] = results[task]['R-1']
         logger = logging.getLogger(__name__)
         logger.info('----------------------------------------')
         logger.info("Print Best_top1 in csv format:")
         for task, res in self.best_top1.items():
             logger.info(f"{task}: " + ", ".join(["{:.1%}".format(res)]))
         logger.info('----------------------------------------')
     # Evaluation may take different time among workers.
     # A barrier make them start the next iteration together.
     comm.synchronize()
Exemplo n.º 10
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            labels = comm.gather(self.labels)
            labels = sum(labels, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features
            labels = self.labels

        features = torch.cat(features, dim=0)
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_labels = np.asarray(labels[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(labels[self._num_query:])

        self._results = OrderedDict()

        if self._num_query == len(features):
            cmc = recall_at_ks(query_features,
                               query_labels,
                               self.recalls,
                               cosine=True)
        else:
            cmc = recall_at_ks(query_features,
                               query_labels,
                               self.recalls,
                               gallery_features,
                               gallery_pids,
                               cosine=True)

        for r in self.recalls:
            self._results['Recall@{}'.format(r)] = cmc[r]
        self._results["metric"] = cmc[self.recalls[0]]

        return copy.deepcopy(self._results)
Exemplo n.º 11
0
    def _do_eval(self):
        results = self._func()

        if results:
            assert isinstance(
                results, dict
            ), "Eval function must return a dict. Got {} instead.".format(results)

            flattened_results = flatten_results_dict(results)
            for k, v in flattened_results.items():
                try:
                    v = float(v)
                except Exception:
                    raise ValueError(
                        "[EvalHook] eval_function should return a nested dict of float. "
                        "Got '{}: {}' instead.".format(k, v)
                    )
            self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)

        # Evaluation may take different time among workers.
        # A barrier make them start the next iteration together.
        comm.synchronize()
Exemplo n.º 12
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            pred_logits = comm.gather(self.pred_logits)
            pred_logits = sum(pred_logits, [])

            gt_labels = comm.gather(self.gt_labels)
            gt_labels = sum(gt_labels, [])

            if not comm.is_main_process():
                return {}
        else:
            pred_logits = self.pred_logits
            gt_labels = self.gt_labels

        pred_logits = torch.stack(pred_logits, dim=0).numpy()
        gt_labels = torch.stack(gt_labels, dim=0).numpy()

        # Pedestrian attribute metrics
        thres = self.cfg.TEST.THRES
        self._results = self.get_attr_metrics(gt_labels, pred_logits, thres)

        return copy.deepcopy(self._results)
Exemplo n.º 13
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process(): return {}

        else:
            predictions = self._predictions

        total_correct_num = 0
        total_samples = 0
        for prediction in predictions:
            total_correct_num += prediction["num_correct"]
            total_samples += prediction["num_samples"]

        acc1 = total_correct_num / total_samples * 100

        self._results = OrderedDict()
        self._results["Acc@1"] = acc1
        self._results["metric"] = acc1

        return copy.deepcopy(self._results)
Exemplo n.º 14
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            camids = comm.gather(self.camids)
            camids = sum(camids, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features
            pids = self.pids
            camids = self.camids

        features = torch.cat(features, dim=0)
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])
        query_camids = np.asarray(camids[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])
        gallery_camids = np.asarray(camids[self._num_query:])

        self._results = OrderedDict()

        if self.cfg.TEST.AQE.ENABLED:
            logger.info("Test with AQE setting")
            qe_time = self.cfg.TEST.AQE.QE_TIME
            qe_k = self.cfg.TEST.AQE.QE_K
            alpha = self.cfg.TEST.AQE.ALPHA
            query_features, gallery_features = aqe(query_features, gallery_features, qe_time, qe_k, alpha)

        dist = build_dist(query_features, gallery_features, self.cfg.TEST.METRIC)

        if self.cfg.TEST.RERANK.ENABLED:
            logger.info("Test with rerank setting")
            k1 = self.cfg.TEST.RERANK.K1
            k2 = self.cfg.TEST.RERANK.K2
            lambda_value = self.cfg.TEST.RERANK.LAMBDA

            if self.cfg.TEST.METRIC == "cosine":
                query_features = F.normalize(query_features, dim=1)
                gallery_features = F.normalize(gallery_features, dim=1)

            rerank_dist = build_dist(query_features, gallery_features, metric="jaccard", k1=k1, k2=k2)
            dist = rerank_dist * (1 - lambda_value) + dist * lambda_value

        cmc, all_AP, all_INP = evaluate_rank(dist, query_pids, gallery_pids, query_camids, gallery_camids)

        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)
        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1]
        self._results['mAP'] = mAP
        self._results['mINP'] = mINP
        self._results["metric"] = (mAP + cmc[0]) / 2

        if self.cfg.TEST.ROC_ENABLED:
            scores, labels = evaluate_roc(dist, query_pids, gallery_pids, query_camids, gallery_camids)
            fprs, tprs, thres = metrics.roc_curve(labels, scores)

            for fpr in [1e-4, 1e-3, 1e-2]:
                ind = np.argmin(np.abs(fprs - fpr))
                self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]

        return copy.deepcopy(self._results)
Exemplo n.º 15
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features
            pids = self.pids

        features = torch.cat(features, dim=0)
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])

        if self.cfg.TEST.AQE.ENABLED:
            logger.info("Test with AQE setting")
            qe_time = self.cfg.TEST.AQE.QE_TIME
            qe_k = self.cfg.TEST.AQE.QE_K
            alpha = self.cfg.TEST.AQE.ALPHA
            query_features, gallery_features = aqe(query_features,
                                                   gallery_features, qe_time,
                                                   qe_k, alpha)

        if self.cfg.TEST.METRIC == "cosine":
            query_features = F.normalize(query_features, dim=1)
            gallery_features = F.normalize(gallery_features, dim=1)

        dist = build_dist(query_features, gallery_features,
                          self.cfg.TEST.METRIC)

        if self.cfg.TEST.RERANK.ENABLED:
            logger.info("Test with rerank setting")
            k1 = self.cfg.TEST.RERANK.K1
            k2 = self.cfg.TEST.RERANK.K2
            lambda_value = self.cfg.TEST.RERANK.LAMBDA

            if self.cfg.TEST.METRIC == "cosine":
                query_features = F.normalize(query_features, dim=1)
                gallery_features = F.normalize(gallery_features, dim=1)

            rerank_dist = build_dist(query_features,
                                     gallery_features,
                                     metric="jaccard",
                                     k1=k1,
                                     k2=k2)
            dist = rerank_dist * (1 - lambda_value) + dist * lambda_value

        if self.cfg.TEST.SAVE_DISTMAT:
            np.save(os.path.join(self.cfg.OUTPUT_DIR, "distmat.npy"), dist)

        results = defaultdict(list)

        topk_indices = partition_arg_topK(dist, K=200, axis=1)
        for i in range(topk_indices.shape[0]):
            results[query_pids[i]].extend(gallery_pids[topk_indices[i]])

        with open(os.path.join(self.cfg.OUTPUT_DIR, "submit.json"), 'w') as f:
            json.dump(results, f)

        return {}
Exemplo n.º 16
0
def init_pretrained_weights(model, key=''):
    """Initializes model with pretrained weights.

    Layers that don't match with pretrained layers in name or size are kept unchanged.
    """
    import os
    import errno
    import gdown
    from collections import OrderedDict
    import warnings
    import logging

    logger = logging.getLogger(__name__)

    def _get_torch_home():
        ENV_TORCH_HOME = 'TORCH_HOME'
        ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
        DEFAULT_CACHE_DIR = '~/.cache'
        torch_home = os.path.expanduser(
            os.getenv(
                ENV_TORCH_HOME,
                os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR),
                             'torch')))
        return torch_home

    torch_home = _get_torch_home()
    model_dir = os.path.join(torch_home, 'checkpoints')
    try:
        os.makedirs(model_dir)
    except OSError as e:
        if e.errno == errno.EEXIST:
            # Directory already exists, ignore.
            pass
        else:
            # Unexpected OSError, re-raise.
            raise
    filename = key + '_imagenet.pth'
    cached_file = os.path.join(model_dir, filename)

    if not os.path.exists(cached_file):
        if comm.is_main_process():
            gdown.download(model_urls[key], cached_file, quiet=False)

    comm.synchronize()

    state_dict = torch.load(cached_file, map_location=torch.device('cpu'))
    model_dict = model.state_dict()
    new_state_dict = OrderedDict()
    matched_layers, discarded_layers = [], []

    for k, v in state_dict.items():
        if k.startswith('module.'):
            k = k[7:]  # discard module.

        if k in model_dict and model_dict[k].size() == v.size():
            new_state_dict[k] = v
            matched_layers.append(k)
        else:
            discarded_layers.append(k)

    model_dict.update(new_state_dict)
    model.load_state_dict(model_dict)

    if len(matched_layers) == 0:
        warnings.warn('The pretrained weights from "{}" cannot be loaded, '
                      'please check the key names manually '
                      '(** ignored and continue **)'.format(cached_file))
    else:
        logger.info(
            'Successfully loaded imagenet pretrained weights from "{}"'.format(
                cached_file))
        if len(discarded_layers) > 0:
            logger.info('** The following layers are discarded '
                        'due to unmatched keys or layer size: {}'.format(
                            discarded_layers))
Exemplo n.º 17
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            camids = comm.gather(self.camids)
            camids = sum(camids, [])

            names = comm.gather(self.names)
            names = sum(names, [])

            if not comm.is_main_process():
                return {}
        else:
            features = self.features
            pids = self.pids
            camids = self.camids
            names = self.names

        features = torch.cat(features, dim=0)
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])
        query_camids = np.asarray(camids[:self._num_query])
        query_names = names[:self._num_query]

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])
        gallery_camids = np.asarray(camids[self._num_query:])
        gallery_names = names[self._num_query:]

        # save_dicts = {'q_feat': query_features.numpy(),
        #               'q_id': query_pids,
        #               'q_cam': query_camids,
        #               'q_name': query_names,
        #               'g_feat': gallery_features.numpy(),
        #               'g_id': gallery_pids,
        #               'g_cam': gallery_camids,
        #               'g_name': gallery_names}
        # import pickle
        # print('Saving features ...')
        # with open('feats/pcl_filter_flip_aug.pkl', 'wb') as f:
        #     pickle.dump(save_dicts, f)

        self._results = OrderedDict()

        if self.cfg.TEST.AQE.ENABLED:
            logger.info("Test with AQE setting")
            qe_time = self.cfg.TEST.AQE.QE_TIME
            qe_k = self.cfg.TEST.AQE.QE_K
            alpha = self.cfg.TEST.AQE.ALPHA
            query_features, gallery_features = aqe(query_features,
                                                   gallery_features, qe_time,
                                                   qe_k, alpha)

        if self.cfg.TEST.METRIC == "cosine":
            query_features = F.normalize(query_features, dim=1)
            gallery_features = F.normalize(gallery_features, dim=1)

        dist = self.cal_dist(self.cfg.TEST.METRIC, query_features,
                             gallery_features)

        if self.cfg.TEST.RERANK.ENABLED:
            logger.info("Test with rerank setting")
            k1 = self.cfg.TEST.RERANK.K1
            k2 = self.cfg.TEST.RERANK.K2
            lambda_value = self.cfg.TEST.RERANK.LAMBDA
            q_q_dist = self.cal_dist(self.cfg.TEST.METRIC, query_features,
                                     query_features)
            g_g_dist = self.cal_dist(self.cfg.TEST.METRIC, gallery_features,
                                     gallery_features)
            re_dist = re_ranking(dist, q_q_dist, g_g_dist, k1, k2,
                                 lambda_value)
            query_features = query_features.numpy()
            gallery_features = gallery_features.numpy()
            cmc, all_AP, all_INP = evaluate_rank(re_dist,
                                                 query_features,
                                                 gallery_features,
                                                 query_pids,
                                                 gallery_pids,
                                                 query_camids,
                                                 gallery_camids,
                                                 use_distmat=True)
        else:
            query_features = query_features.numpy()
            gallery_features = gallery_features.numpy()
            cmc, all_AP, all_INP = evaluate_rank(dist,
                                                 query_features,
                                                 gallery_features,
                                                 query_pids,
                                                 gallery_pids,
                                                 query_camids,
                                                 gallery_camids,
                                                 use_distmat=False)
        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)
        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1]
        self._results['mAP'] = mAP
        self._results['mINP'] = mINP

        if self.cfg.TEST.ROC_ENABLED:
            scores, labels = evaluate_roc(dist, query_features,
                                          gallery_features, query_pids,
                                          gallery_pids, query_camids,
                                          gallery_camids)
            fprs, tprs, thres = metrics.roc_curve(labels, scores)

            for fpr in [1e-4, 1e-3, 1e-2]:
                ind = np.argmin(np.abs(fprs - fpr))
                self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]

        return copy.deepcopy(self._results)
Exemplo n.º 18
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            spatial_features = comm.gather(self.spatial_features)
            spatial_features = sum(spatial_features, [])

            scores = comm.gather(self.scores)
            scores = sum(scores, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            camids = comm.gather(self.camids)
            camids = sum(camids, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features
            spatial_features = self.spatial_features
            scores = self.scores
            pids = self.pids
            camids = self.camids

        features = torch.cat(features, dim=0)
        spatial_features = torch.cat(spatial_features, dim=0).numpy()
        scores = torch.cat(scores, dim=0)

        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])
        query_camids = np.asarray(camids[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])
        gallery_camids = np.asarray(camids[self._num_query:])

        if self.cfg.TEST.METRIC == "cosine":
            query_features = F.normalize(query_features, dim=1)
            gallery_features = F.normalize(gallery_features, dim=1)

        dist = 1 - torch.mm(query_features, gallery_features.t()).numpy()
        self._results = OrderedDict()

        query_features = query_features.numpy()
        gallery_features = gallery_features.numpy()
        if self.cfg.TEST.DSR.ENABLED:
            logger.info("Testing with DSR setting")
            dsr_dist = compute_dsr_dist(spatial_features[:self._num_query],
                                        spatial_features[self._num_query:],
                                        dist, scores[:self._num_query])

            max_value = 0
            k = 0
            for i in range(0, 101):
                lamb = 0.01 * i
                dist1 = (1 - lamb) * dist + lamb * dsr_dist
                cmc, all_AP, all_INP = evaluate_rank(dist1, query_pids,
                                                     gallery_pids,
                                                     query_camids,
                                                     gallery_camids)
                if (cmc[0] > max_value):
                    k = lamb
                    max_value = cmc[0]
            dist1 = (1 - k) * dist + k * dsr_dist
            cmc, all_AP, all_INP = evaluate_rank(dist1, query_pids,
                                                 gallery_pids, query_camids,
                                                 gallery_camids)
        else:
            cmc, all_AP, all_INP = evaluate_rank(dist, query_pids,
                                                 gallery_pids, query_camids,
                                                 gallery_camids)

        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)
        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1] * 100
        self._results['mAP'] = mAP * 100
        self._results['mINP'] = mINP * 100

        return copy.deepcopy(self._results)
Exemplo n.º 19
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return {}

        else:
            predictions = self._predictions

        features = []
        pids = []
        camids = []
        for prediction in predictions:
            features.append(prediction['feats'])
            pids.append(prediction['pids'])
            camids.append(prediction['camids'])

        features = torch.cat(features, dim=0)
        pids = torch.cat(pids, dim=0).numpy()
        camids = torch.cat(camids, dim=0).numpy()
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = pids[:self._num_query]
        query_camids = camids[:self._num_query]

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = pids[self._num_query:]
        gallery_camids = camids[self._num_query:]

        query_features = query_features.numpy()
        gallery_features = gallery_features.numpy()

        self._results = OrderedDict()

        if self.cfg.TEST.AQE.ENABLED:
            logger.info("Test with AQE setting")
            qe_time = self.cfg.TEST.AQE.QE_TIME
            qe_k = self.cfg.TEST.AQE.QE_K
            alpha = self.cfg.TEST.AQE.ALPHA
            query_features, gallery_features = aqe(query_features,
                                                   gallery_features, qe_time,
                                                   qe_k, alpha)

        query_features = query_features.numpy()
        gallery_features = gallery_features.numpy()
        dist = build_dist(query_features, gallery_features,
                          self.cfg.TEST.METRIC)

        if self.cfg.TEST.RERANK.ENABLED:
            logger.info("Test with rerank setting")
            k1 = self.cfg.TEST.RERANK.K1
            k2 = self.cfg.TEST.RERANK.K2
            lambda_value = self.cfg.TEST.RERANK.LAMBDA

            if self.cfg.TEST.METRIC == "cosine":
                query_features = F.normalize(query_features, dim=1)
                gallery_features = F.normalize(gallery_features, dim=1)

            rerank_dist = build_dist(query_features,
                                     gallery_features,
                                     metric="jaccard",
                                     k1=k1,
                                     k2=k2)
            dist = rerank_dist * (1 - lambda_value) + dist * lambda_value

        cmc, all_AP, all_INP = evaluate_rank(dist, query_pids, gallery_pids,
                                             query_camids, gallery_camids)

        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)
        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1] * 100
        self._results['mAP'] = mAP * 100
        self._results['mINP'] = mINP * 100
        self._results["metric"] = (mAP + cmc[0]) / 2 * 100

        if self.cfg.TEST.ROC.ENABLED:
            scores, labels = evaluate_roc(dist, query_pids, gallery_pids,
                                          query_camids, gallery_camids)
            fprs, tprs, thres = metrics.roc_curve(labels, scores)

            for fpr in [1e-4, 1e-3, 1e-2]:
                ind = np.argmin(np.abs(fprs - fpr))
                self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]

        return copy.deepcopy(self._results)
Exemplo n.º 20
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            camids = comm.gather(self.camids)
            camids = sum(camids, [])

            img_paths = comm.gather(self.img_paths)
            img_paths = sum(img_paths, [])

            if not comm.is_main_process():
                return {}
        else:
            features = self.features
            pids = self.pids
            camids = self.camids
            img_paths = self.img_paths

        features = torch.cat(features, dim=0)
        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])
        query_camids = np.asarray(camids[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])
        gallery_camids = np.asarray(camids[self._num_query:])

        self._results = OrderedDict()

        if self.cfg.TEST.AQE.ENABLED:
            logger.info("Test with AQE setting")
            qe_time = self.cfg.TEST.AQE.QE_TIME
            qe_k = self.cfg.TEST.AQE.QE_K
            alpha = self.cfg.TEST.AQE.ALPHA
            query_features, gallery_features = aqe(query_features, gallery_features, qe_time, qe_k, alpha)

        if self.cfg.TEST.METRIC == "cosine":
            query_features = F.normalize(query_features, dim=1)
            gallery_features = F.normalize(gallery_features, dim=1)

        dist = self.cal_dist(self.cfg.TEST.METRIC, query_features, gallery_features)
        
        if self.cfg.TEST.RERANK.ENABLED:
            logger.info("Test with rerank setting")
            k1 = self.cfg.TEST.RERANK.K1
            k2 = self.cfg.TEST.RERANK.K2
            lambda_value = self.cfg.TEST.RERANK.LAMBDA
            # q_q_dist = self.cal_dist(self.cfg.TEST.METRIC, query_features, query_features)
            # g_g_dist = self.cal_dist(self.cfg.TEST.METRIC, gallery_features, gallery_features)
            # re_dist = re_ranking(dist, q_q_dist, g_g_dist, k1, k2, lambda_value)
            # Luo rerank
            re_dist = re_ranking(query_features, gallery_features, k1, k2, lambda_value)
            print('re_dist',re_dist.shape)
            query_features = query_features.numpy()
            gallery_features = gallery_features.numpy()
            # gen json
            eval_json(img_paths,self._num_query,re_dist,query_features,gallery_features,self._output_dir,use_distmat=True)
            return 
            #
            cmc, all_AP, all_INP = evaluate_rank(re_dist, query_features, gallery_features,
                                                 query_pids, gallery_pids, query_camids,
                                                 gallery_camids, use_distmat=True)
        else:
            query_features = query_features.numpy()
            gallery_features = gallery_features.numpy()
            # gen json
            eval_json(img_paths,self._num_query,dist,query_features,gallery_features,self._output_dir,use_distmat=False)
            return
            # 
            cmc, all_AP, all_INP = evaluate_rank(dist, query_features, gallery_features,
                                                 query_pids, gallery_pids, query_camids, gallery_camids,
                                                 use_distmat=False)
        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)
        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1]
        self._results['mAP'] = mAP
        self._results['mINP'] = mINP

        if self.cfg.TEST.ROC_ENABLED:
            scores, labels = evaluate_roc(dist, query_features, gallery_features,
                                          query_pids, gallery_pids, query_camids, gallery_camids)
            fprs, tprs, thres = metrics.roc_curve(labels, scores)

            for fpr in [1e-4, 1e-3, 1e-2]:
                ind = np.argmin(np.abs(fprs - fpr))
                self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]

        return copy.deepcopy(self._results)
Exemplo n.º 21
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            predictions = comm.gather(self._predictions, dst=0)
            predictions = list(itertools.chain(*predictions))

            if not comm.is_main_process():
                return {}
        else:
            predictions = self._predictions

        features = []
        pids = []
        # camids = []
        for prediction in predictions:
            features.append(prediction['feats'])
            pids.append(prediction['pids'])
            # camids.append(prediction['camids'])

        features = torch.cat(features, dim=0)
        pids = torch.cat(pids, dim=0).numpy()

        rerank_dist = compute_jaccard_distance(
            features,
            k1=self.cfg.CLUSTER.JACCARD.K1,
            k2=self.cfg.CLUSTER.JACCARD.K2,
        )
        pseudo_labels = self.cluster.fit_predict(rerank_dist)

        ARI_score = metrics.adjusted_rand_score(pids, pseudo_labels)
        contingency_matrix = metrics.cluster.contingency_matrix(
            pids, pseudo_labels)
        purity = np.sum(np.amax(contingency_matrix,
                                axis=0)) / np.sum(contingency_matrix)
        return ARI_score, purity


# def extract_cnn_feature(model, inputs, return_branches=False):
#     inputs = to_torch(inputs).cuda()
#     if return_branches:
#         outputs, branches_lists = model(inputs, return_branches=True)
#         outputs = outputs.data.cpu()
#         for i in range(len(branches_lists)):
#             branches_lists[i] = branches_lists[i].data.cpu()
#         return outputs, branches_lists
#     else:
#         outputs = model(inputs)
#         outputs = outputs.data.cpu()
#         return outputs

# def extract_features(model, data_loader, print_freq=50, with_mem_idx=False, return_branches=False):
#     model.eval()
#     batch_time = AverageMeter()
#     data_time = AverageMeter()

#     features = OrderedDict()
#     labels = OrderedDict()

#     end = time.time()
#     with torch.no_grad():
#         for i, data in enumerate(data_loader):
#             if with_mem_idx:
#                 imgs, fnames, pids, _, _ = data
#             else:
#                 imgs, fnames, pids, _ = data
#             data_time.update(time.time() - end)

#             if return_branches:
#                 outputs, branches_lists = extract_cnn_feature(model, imgs, return_branches=True)
#                 for i_batch, (fname, output, pid) in enumerate(zip(fnames, outputs, pids)):
#                     features[fname] = {
#                         'feat': output,
#                         'branches_list': [branches_lists[j_branch][i_batch] for j_branch in range(len(branches_lists))]
#                     }
#                     labels[fname] = pid
#             else:
#                 outputs = extract_cnn_feature(model, imgs)
#                 for fname, output, pid in zip(fnames, outputs, pids):
#                     features[fname] = output
#                     labels[fname] = pid

#             batch_time.update(time.time() - end)
#             end = time.time()

#             if (i + 1) % print_freq == 0:
#                 print('Extract Features: [{}/{}]\t'
#                       'Time {:.3f} ({:.3f})\t'
#                       'Data {:.3f} ({:.3f})\t'
#                       .format(i + 1, len(data_loader),
#                               batch_time.val, batch_time.avg,
#                               data_time.val, data_time.avg))

#     return features, labels
Exemplo n.º 22
0
    def evaluate(self):
        if comm.get_world_size() > 1:
            comm.synchronize()
            features = comm.gather(self.features)
            features = sum(features, [])

            spatial_features = comm.gather(self.spatial_features)
            spatial_features = sum(spatial_features, [])

            scores = comm.gather(self.scores)
            scores = sum(scores, [])

            pids = comm.gather(self.pids)
            pids = sum(pids, [])

            camids = comm.gather(self.camids)
            camids = sum(camids, [])

            # fmt: off
            if not comm.is_main_process(): return {}
            # fmt: on
        else:
            features = self.features
            spatial_features = self.spatial_features
            scores = self.scores
            pids = self.pids
            camids = self.camids

        features = torch.cat(features, dim=0)
        spatial_features = torch.cat(spatial_features, dim=0).numpy()
        scores = torch.cat(scores, dim=0)

        # query feature, person ids and camera ids
        query_features = features[:self._num_query]
        query_pids = np.asarray(pids[:self._num_query])
        query_camids = np.asarray(camids[:self._num_query])

        # gallery features, person ids and camera ids
        gallery_features = features[self._num_query:]
        gallery_pids = np.asarray(pids[self._num_query:])
        gallery_camids = np.asarray(camids[self._num_query:])

        if self.cfg.TEST.METRIC == "cosine":
            query_features = F.normalize(query_features, dim=1)
            gallery_features = F.normalize(gallery_features, dim=1)

        dist = 1 - torch.mm(query_features, gallery_features.t()).numpy()
        self._results = OrderedDict()

        query_features = query_features.numpy()
        gallery_features = gallery_features.numpy()
        if self.cfg.TEST.DSR.ENABLED:
            logger.info("Testing with DSR setting")
            dist = compute_dsr_dist(spatial_features[:self._num_query],
                                    spatial_features[self._num_query:], dist,
                                    scores[:self._num_query])
            cmc, all_AP, all_INP = evaluate_rank(dist,
                                                 query_features,
                                                 gallery_features,
                                                 query_pids,
                                                 gallery_pids,
                                                 query_camids,
                                                 gallery_camids,
                                                 use_distmat=True)
        else:
            cmc, all_AP, all_INP = evaluate_rank(dist,
                                                 query_features,
                                                 gallery_features,
                                                 query_pids,
                                                 gallery_pids,
                                                 query_camids,
                                                 gallery_camids,
                                                 use_distmat=False)
        mAP = np.mean(all_AP)
        mINP = np.mean(all_INP)

        for r in [1, 5, 10]:
            self._results['Rank-{}'.format(r)] = cmc[r - 1]
        self._results['mAP'] = mAP
        self._results['mINP'] = mINP

        if self.cfg.TEST.ROC_ENABLED:
            scores, labels = evaluate_roc(dist, query_features,
                                          gallery_features, query_pids,
                                          gallery_pids, query_camids,
                                          gallery_camids)
            fprs, tprs, thres = metrics.roc_curve(labels, scores)

            for fpr in [1e-4, 1e-3, 1e-2]:
                ind = np.argmin(np.abs(fprs - fpr))
                self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]

        return copy.deepcopy(self._results)