Exemple #1
0
    def score(self, X, y):
        """
        Generates the Precision-Recall curve on the specified test data.

        Returns
        -------
        score_ : float
            Average precision, a summary of the plot as a weighted mean of
            precision at each threshold, weighted by the increase in recall from
            the previous threshold.

        """
        # If we don't do this check, then it is possible that OneVsRestClassifier
        # has not correctly been fitted for multi-class targets.
        if not hasattr(self, "target_type_"):
            raise NotFitted.from_estimator(self, "score")

        # Must perform label binarization before calling super
        if self.target_type_ == MULTICLASS:
            # Use label_binarize to create multi-label output for OneVsRestClassifier
            y = label_binarize(y, classes=self._target_labels)

        # Call super to check if fitted and to compute classes_
        # Note that self.score_ computed in super will be overridden below
        super(PrecisionRecallCurve, self).score(X, y)

        # Compute the prediction/threshold scores
        y_scores = self._get_y_scores(X)

        # Handle binary and multiclass cases to create correct data structure
        if self.target_type_ == BINARY:
            self.precision_, self.recall_, _ = sk_precision_recall_curve(
                y, y_scores)
            self.score_ = average_precision_score(y, y_scores)
        else:
            self.precision_, self.recall_, self.score_ = {}, {}, {}

            # Compute PRCurve for all classes
            for i, class_i in enumerate(self.classes_):
                self.precision_[class_i], self.recall_[
                    class_i], _ = sk_precision_recall_curve(
                        y[:, i], y_scores[:, i])
                self.score_[class_i] = average_precision_score(
                    y[:, i], y_scores[:, i])

            # Compute micro average PR curve
            self.precision_[MICRO], self.recall_[
                MICRO], _ = sk_precision_recall_curve(y.ravel(),
                                                      y_scores.ravel())
            self.score_[MICRO] = average_precision_score(y,
                                                         y_scores,
                                                         average=MICRO)

        # Draw the figure
        self.draw()

        # Return a score between 0 and 1
        if self.target_type_ == BINARY:
            return self.score_
        return self.score_[MICRO]
Exemple #2
0
    def score(self, X, y=None):
        """
        Generates the Precision-Recall curve on the specified test data.

        Returns
        -------
        score_ : float
            Average precision, a summary of the plot as a weighted mean of
            precision at each threshold, weighted by the increase in recall from
            the previous threshold.
        """
        # If we don't do this check, then it is possible that OneVsRestClassifier
        # has not correctly been fitted for multi-class targets.
        if not hasattr(self, "target_type_"):
            raise NotFitted((
                "{} cannot wrap an already fitted estimator"
            ).format(
                self.__class__.__name__
            ))

        # Compute the prediction/threshold scores
        y_scores = self._get_y_scores(X)

        # Handle binary and multiclass cases to create correct data structure
        if self.target_type_ == BINARY:
            self.precision_, self.recall_, _ = sk_precision_recall_curve(y, y_scores)
            self.score_ = average_precision_score(y, y_scores)
        else:
            # Use label_binarize to create multi-label ouptut for OneVsRestClassifier
            Y = label_binarize(y, classes=self.classes_)

            self.precision_, self.recall_, self.score_ = {}, {}, {}

            # Compute PRCurve for all classes
            for i, class_i in enumerate(self.classes_):
                self.precision_[class_i], self.recall_[class_i], _ = sk_precision_recall_curve(Y[:,i], y_scores[:,i])
                self.score_[class_i] = average_precision_score(Y[:,i], y_scores[:,i])

            # Compute micro average PR curve
            self.precision_[MICRO], self.recall_[MICRO], _ = sk_precision_recall_curve(
                Y.ravel(), y_scores.ravel()
            )
            self.score_[MICRO] = average_precision_score(Y, y_scores, average=MICRO)

        # Draw the figure
        self.draw()

        # Return a score between 0 and 1
        if self.target_type_ == BINARY:
            return self.score_
        return self.score_[MICRO]
Exemple #3
0
def _sk_precision_recall_curve(y_true, probas_pred, num_classes=1):
    """ Adjusted comparison function that can also handles multiclass """
    if num_classes == 1:
        return sk_precision_recall_curve(y_true, probas_pred)

    precision, recall, thresholds = [], [], []
    for i in range(num_classes):
        y_true_temp = np.zeros_like(y_true)
        y_true_temp[y_true == i] = 1
        res = sk_precision_recall_curve(y_true_temp, probas_pred[:, i])
        precision.append(res[0])
        recall.append(res[1])
        thresholds.append(res[2])
    return precision, recall, thresholds