Ejemplo n.º 1
0
 def value(self):
     """
     Value of the meter globally synced. mean AP and AP for each class is returned
     """
     _, distributed_rank = get_machine_local_and_dist_rank()
     logging.info(
         f"Rank: {distributed_rank} Mean AP meter: "
         f"scores: {self._scores.shape}, target: {self._targets.shape}")
     ap_matrix = torch.ones(self.num_classes, dtype=torch.float32) * -1
     # targets matrix = 0, 1, -1
     # unknown matrix = 0, 1 where 1 means that it's an unknown
     unknown_matrix = torch.eq(self._targets, -1.0).float().detach().numpy()
     for cls_num in range(self.num_classes):
         # compute AP only for classes that have at least one positive example
         num_pos = len(torch.where(self._targets[:, cls_num] == 1)[0])
         if num_pos == 0:
             continue
         P, R, score, ap = get_precision_recall(
             self._targets[:, cls_num].detach().numpy(),
             self._scores[:, cls_num].detach().numpy(),
             (unknown_matrix[:, cls_num] == 0).astype(np.float),
         )
         ap_matrix[cls_num] = ap[0]
     nonzero_indices = torch.nonzero(ap_matrix != -1)
     if nonzero_indices.shape[0] < self.num_classes:
         logging.info(
             f"{nonzero_indices.shape[0]} out of {self.num_classes} classes "
             "have meaningful average precision")
     mean_ap = ap_matrix[nonzero_indices].mean().item()
     return {"mAP": mean_ap, "AP": ap_matrix}
Ejemplo n.º 2
0
    def test(self, features, targets, sample_num, low_shot_kvalue):
        """
        Test the SVM for the input test features and targets for the given:
            low-shot k-value, sample number

        We compute the meanAP across all classes for a given cost value.
        We get the output matrix of shape (1, #costs) for the given sample_num and
        k-value and save the matrix. We use this information to aggregate
        later.
        """
        logging.info("Testing SVM")
        # normalize the features: N x 9216 (example shape)
        if self.normalize:
            # normalize the features: N x 9216 (example shape)
            features = self._normalize_features(features)

        sample_ap_matrix = np.zeros((1, len(self.costs_list)))
        suffix = f"sample{sample_num}_k{low_shot_kvalue}"
        for cost_idx in range(len(self.costs_list)):
            cost = self.costs_list[cost_idx]
            local_cost_ap = np.zeros((self.num_classes, 1))
            for cls_num in self.cls_list:
                logging.info(
                    f"Test sample/k_value/cost/cls: "
                    f"{sample_num}/{low_shot_kvalue}/{cost}/{cls_num}"
                )
                model_file = self._get_svm_low_shot_model_filename(
                    cls_num, cost, suffix
                )
                model = load_file(model_file)
                prediction = model.decision_function(features)
                eval_preds, eval_cls_labels = self._get_cls_feats_labels(
                    cls_num, prediction, targets
                )
                P, R, score, ap = get_precision_recall(eval_cls_labels, eval_preds)
                local_cost_ap[cls_num][0] = ap
            mean_cost_ap = np.mean(local_cost_ap, axis=0)
            sample_ap_matrix[0][cost_idx] = mean_cost_ap
        out_k_sample_file = (
            f"{self.output_dir}/test_ap_sample{sample_num}_k{low_shot_kvalue}.npy"
        )
        save_data = sample_ap_matrix.reshape((1, -1))
        save_file(save_data, out_k_sample_file)
        logging.info(
            f"Saved sample test k_idx AP: {out_k_sample_file} {save_data.shape}"
        )
Ejemplo n.º 3
0
    def test(self, features, targets):
        """
        Test the trained SVM models on the test features and targets values.
        We use the cost per class that gives the maximum cross validation AP on
        the training and load the correspond trained SVM model for the cost value
        and the class.

        Log the test ap to stdout and also save the AP in a file.
        """
        logging.info("Testing SVM")
        # normalize the features: N x 9216 (example shape)
        if self.normalize:
            # normalize the features: N x 9216 (example shape)
            features = self._normalize_features(features)
        num_classes = targets.shape[1]
        logging.info("Num test classes: {}".format(num_classes))
        # get the chosen cost that maximizes the cross-validation AP per class
        costs_list = self.get_best_cost_value()

        ap_matrix = np.zeros((num_classes, 1))
        for cls_num in range(num_classes):
            cost = costs_list[cls_num]
            logging.info(f"Testing model for cls: {cls_num} cost: {cost}")
            model_file, _ = self._get_svm_model_filename(cls_num, cost)
            model = load_file(model_file)
            prediction = model.decision_function(features)
            cls_labels = targets[:, cls_num]
            # meaning of labels in VOC/COCO original loaded target files:
            # label 0 = not present, set it to -1 as svm train target
            # label 1 = present. Make the svm train target labels as -1, 1.
            evaluate_data_inds = targets[:, cls_num] != -1
            eval_preds = prediction[evaluate_data_inds]
            eval_cls_labels = cls_labels[evaluate_data_inds]
            eval_cls_labels[np.where(eval_cls_labels == 0)] = -1
            P, R, score, ap = get_precision_recall(eval_cls_labels, eval_preds)
            ap_matrix[cls_num][0] = ap
        logging.info(f"Mean test AP: {np.mean(ap_matrix, axis=0)}")
        test_ap_filepath = f"{self.output_dir}/test_ap.npy"
        save_file(np.array(ap_matrix), test_ap_filepath)
        logging.info(f"saved test AP to file: {test_ap_filepath}")