def test_count_nonzero(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) X_nonzero = X != 0 sample_weight = [.5, .2, .3, .1, .1] X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None] for axis in [0, 1, -1, -2, None]: assert_array_almost_equal(count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)) assert_array_almost_equal( count_nonzero(X_csr, axis=axis, sample_weight=sample_weight), X_nonzero_weighted.sum(axis=axis)) assert_raises(TypeError, count_nonzero, X_csc) assert_raises(ValueError, count_nonzero, X_csr, axis=2) assert (count_nonzero(X_csr, axis=0).dtype == count_nonzero(X_csr, axis=1).dtype) assert (count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype == count_nonzero( X_csr, axis=1, sample_weight=sample_weight).dtype) # Check dtypes with large sparse matrices too X_csr.indices = X_csr.indices.astype(np.int64) X_csr.indptr = X_csr.indptr.astype(np.int64) assert (count_nonzero(X_csr, axis=0).dtype == count_nonzero(X_csr, axis=1).dtype) assert (count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype == count_nonzero( X_csr, axis=1, sample_weight=sample_weight).dtype)
def multilabel_confusion_matrix(y_pred, y_true, sample_weight=None, samplewise=False): r""" Compute a confusion matrix for each class or sample in the output. If `sample_weight` is provided, it is required to have the same shape as y_pred This function is inspired by issue #11179 on the sklearn github :see: https://github.com/scikit-learn/scikit-learn/pull/11179 and related pull request for a detailled overview Arguments ---------- y_pred: numpy.ndarray predicted value y_true: numpy.ndarray ground truth sample_weight: numpy.ndarray, optional sample weights for each example in the output. samplewise: bool optional Whether to compute the confusion matrix samplewise or taskwise Returns ------- a confusion matrix of size (N, 2, 2), where N is either the number of labels or the total number of samples. """ y_true = ss.csr_matrix(y_true) y_pred = ss.csr_matrix(y_pred) sum_axis = 1 if samplewise else 0 # calculate weighted counts true_and_pred = y_true.multiply(y_pred) tp_sum = sparsefuncs.count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = sparsefuncs.count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = sparsefuncs.count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) fp = pred_sum - tp_sum fn = true_sum - tp_sum tp = tp_sum if sample_weight is not None and samplewise: sample_weight = np.array(sample_weight) tp = np.array(tp) fp = np.array(fp) fn = np.array(fn) tn = sample_weight * y_true.shape[1] - tp - fp - fn elif sample_weight is not None: tn = sum(sample_weight) - tp - fp - fn elif samplewise: tn = y_true.shape[1] - tp - fp - fn else: tn = y_true.shape[0] - tp - fp - fn return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
def test_count_nonzero(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) X_nonzero = X != 0 sample_weight = [0.5, 0.2, 0.3, 0.1, 0.1] X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None] for axis in [0, 1, -1, -2, None]: assert_array_almost_equal(count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)) assert_array_almost_equal( count_nonzero(X_csr, axis=axis, sample_weight=sample_weight), X_nonzero_weighted.sum(axis=axis) ) assert_raises(TypeError, count_nonzero, X_csc) assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def f1_per_sample(y_true, y_pred): if isdense(y_true) or isdense(y_pred): y_true = sp.csr_matrix(y_true) y_pred = sp.csr_matrix(y_pred) sum_axis = 1 true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis) pred_sum = count_nonzero(y_pred, axis=sum_axis) true_sum = count_nonzero(y_true, axis=sum_axis) with np.errstate(divide='ignore', invalid='ignore'): precision = _prf_divide(tp_sum, pred_sum) recall = _prf_divide(tp_sum, true_sum) f_score = (2 * precision * recall / (1 * precision + recall)) f_score[tp_sum == 0] = 0.0 return f_score
def test_count_nonzero(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) X_nonzero = X != 0 sample_weight = [.5, .2, .3, .1, .1] X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None] for axis in [0, 1, -1, -2, None]: assert_array_almost_equal(count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)) assert_array_almost_equal( count_nonzero(X_csr, axis=axis, sample_weight=sample_weight), X_nonzero_weighted.sum(axis=axis)) assert_raises(TypeError, count_nonzero, X_csc) assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_count_nonzero(): X = np.array([[0, 3, 0], [2, -1, 0], [0, 0, 0], [9, 8, 7], [4, 0, 5]], dtype=np.float64) X_csr = sp.csr_matrix(X) X_csc = sp.csc_matrix(X) X_nonzero = X != 0 sample_weight = [.5, .2, .3, .1, .1] X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None] for axis in [0, 1, -1, -2, None]: assert_array_almost_equal(count_nonzero(X_csr, axis=axis), X_nonzero.sum(axis=axis)) assert_array_almost_equal(count_nonzero(X_csr, axis=axis, sample_weight=sample_weight), X_nonzero_weighted.sum(axis=axis)) assert_raises(TypeError, count_nonzero, X_csc) assert_raises(ValueError, count_nonzero, X_csr, axis=2) assert (count_nonzero(X_csr, axis=0).dtype == count_nonzero(X_csr, axis=1).dtype) assert (count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype == count_nonzero(X_csr, axis=1, sample_weight=sample_weight).dtype) # Check dtypes with large sparse matrices too # XXX: test fails on Appveyor (python2.7 32bit) try: X_csr.indices = X_csr.indices.astype(np.int64) X_csr.indptr = X_csr.indptr.astype(np.int64) assert (count_nonzero(X_csr, axis=0).dtype == count_nonzero(X_csr, axis=1).dtype) assert (count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype == count_nonzero(X_csr, axis=1, sample_weight=sample_weight).dtype) except TypeError as e: if ("according to the rule 'safe'" in e.args[0] and np.intp().nbytes < 8): pass else: raise
def accuracy_rounding_score(y_true, y_pred, normalize=True, sample_weight=None): for p in range(0, y_pred.size): labels = [ 500., 1000., 1500., 2000., 2500., 3000., 3500., 4000., 4500., 5000., 6000., 7000., 8000., 9000., 10000., 12500., 15000. ] y_pred[p] = min(labels, key=lambda x: abs(x - y_pred[p])) # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) check_consistent_length(y_true, y_pred, sample_weight) if y_type.startswith('multilabel'): differing_labels = count_nonzero(y_true - y_pred, axis=1) score = differing_labels == 0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize)
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None, pos_label=1, average=None, warn_for=('precision', 'recall', 'f-score'), sample_weight=None): """Compute precision, recall, F-measure and support for each class The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The F-beta score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0. The F-beta score weights recall more than precision by a factor of ``beta``. ``beta == 1.0`` means recall and precision are equally important. The support is the number of occurrences of each class in ``y_true``. If ``pos_label is None`` and in binary classification, this function returns the average precision, recall and F-measure if ``average`` is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta : float, 1.0 by default The strength of recall versus precision in the F-score. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : str or int, 1 by default The class to report if ``average='binary'`` and the data is binary. If the data are multiclass or multilabel, this will be ignored; setting ``labels=[pos_label]`` and ``average != 'binary'`` will report scores for that label only. average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \ 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). warn_for : tuple or set, for internal use This determines which warnings will be made in the case that this function is being used to return only one of its metrics. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision : float (if average is not None) or array of float, shape =\ [n_unique_labels] recall : float (if average is not None) or array of float, , shape =\ [n_unique_labels] fbeta_score : float (if average is not None) or array of float, shape =\ [n_unique_labels] support : int (if average is not None) or array of int, shape =\ [n_unique_labels] The number of occurrences of each label in ``y_true``. References ---------- .. [1] `Wikipedia entry for the Precision and recall <https://en.wikipedia.org/wiki/Precision_and_recall>`_ .. [2] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_ .. [3] `Discriminative Methods for Multi-labeled Classification Advances in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu Godbole, Sunita Sarawagi <http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_ Examples -------- >>> from sklearn.metrics import precision_recall_fscore_support >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig']) >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog']) >>> precision_recall_fscore_support(y_true, y_pred, average='macro') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='micro') ... # doctest: +ELLIPSIS (0.33..., 0.33..., 0.33..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) It is possible to compute per-label precisions, recalls, F1-scores and supports instead of averaging: >>> precision_recall_fscore_support(y_true, y_pred, average=None, ... labels=['pig', 'dog', 'cat']) ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE (array([0. , 0. , 0.66...]), array([0., 0., 1.]), array([0. , 0. , 0.8]), array([2, 2, 2])) """ average_options = (None, 'micro', 'macro', 'weighted', 'samples') if average not in average_options and average != 'binary': raise ValueError('average has to be one of ' + str(average_options)) if beta <= 0: raise ValueError("beta should be >0 in the F-beta score") y_type, y_true, y_pred = _check_targets(y_true, y_pred) check_consistent_length(y_true, y_pred, sample_weight) present_labels = unique_labels(y_true, y_pred) if average == 'binary': if y_type == 'binary': if pos_label not in present_labels: if len(present_labels) < 2: # Only negative labels return (0., 0., 0., 0) else: raise ValueError("pos_label=%r is not a valid label: %r" % (pos_label, present_labels)) labels = [pos_label] else: raise ValueError("Target is %s but average='binary'. Please " "choose another average setting." % y_type) elif pos_label not in (None, 1): warnings.warn( "Note that pos_label (set to %r) is ignored when " "average != 'binary' (got %r). You may use " "labels=[pos_label] to specify a single positive class." % (pos_label, average), UserWarning) if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack( [labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) # Calculate tp_sum, pred_sum, true_sum ### if y_type.startswith('multilabel'): sum_axis = 1 if average == 'samples' else 0 # All labels are index integers for multilabel. # Select labels: if not np.all(labels == present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels). ' 'Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels). ' 'Got %d < 0' % np.min(labels)) if n_labels is not None: y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] # calculate weighted counts true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) elif average == 'samples': raise ValueError("Sample-based precision, recall, fscore is " "not meaningful outside multilabel " "classification. See the accuracy_score instead.") else: le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ # labels are now from 0 to len(labels) - 1 -> use bincount tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: # Pathological case true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels)) # Retain only selected labels indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] if average == 'micro': tp_sum = np.array([tp_sum.sum()]) pred_sum = np.array([pred_sum.sum()]) true_sum = np.array([true_sum.sum()]) # Finally, we have all our sufficient statistics. Divide! # beta2 = beta**2 with np.errstate(divide='ignore', invalid='ignore'): # Divide, and on zero-division, set scores to 0 and warn: # Oddly, we may get an "invalid" rather than a "divide" error # here. precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', average, warn_for) recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', average, warn_for) # Don't need to warn for F: either P or R warned, or tp == 0 where pos # and true are nonzero, in which case, F is well-defined and zero f_score = ((1 + beta2) * precision * recall / (beta2 * precision + recall)) f_score[tp_sum == 0] = 0.0 # Average the results if average == 'weighted': weights = true_sum if weights.sum() == 0: return 0, 0, 0, None elif average == 'samples': weights = sample_weight else: weights = None if average is not None: assert average != 'binary' or len(precision) == 1 precision = np.average(precision, weights=weights) recall = np.average(recall, weights=weights) f_score = np.average(f_score, weights=weights) true_sum = None # return no support return precision, recall, f_score, true_sum
def multilabel_confusion_matrix(y_true, y_pred, sample_weight=None, labels=None, samplewise=False): """Compute a confusion matrix for each class or sample .. versionadded:: 0.21 Compute class-wise (default) or sample-wise (samplewise=True) multilabel confusion matrix to evaluate the accuracy of a classification, and output confusion matrices for each class or sample. In multilabel confusion matrix :math:`MCM`, the count of true negatives is :math:`MCM_{:,0,0}`, false negatives is :math:`MCM_{:,1,0}`, true positives is :math:`MCM_{:,1,1}` and false positives is :math:`MCM_{:,0,1}`. Multiclass data will be treated as if binarized under a one-vs-rest transformation. Returned confusion matrices will be in the order of sorted unique labels in the union of (y_true, y_pred). Read more in the :ref:`User Guide <multilabel_confusion_matrix>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix of shape (n_samples, n_outputs) or (n_samples,) Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix of shape (n_samples, n_outputs) or (n_samples,) Estimated targets as returned by a classifier sample_weight : array-like of shape = (n_samples,), optional Sample weights labels : array-like A list of classes or column indices to select some (or to force inclusion of classes absent from the data) samplewise : bool, default=False In the multilabel case, this calculates a confusion matrix per sample Returns ------- multi_confusion : array, shape (n_outputs, 2, 2) A 2x2 confusion matrix corresponding to each output in the input. When calculating class-wise multi_confusion (default), then n_outputs = n_labels; when calculating sample-wise multi_confusion (samplewise=True), n_outputs = n_samples. If ``labels`` is defined, the results will be returned in the order specified in ``labels``, otherwise the results will be returned in sorted order by default. See also -------- confusion_matrix Notes ----- The multilabel_confusion_matrix calculates class-wise or sample-wise multilabel confusion matrices, and in multiclass tasks, labels are binarized under a one-vs-rest way; while confusion_matrix calculates one confusion matrix for confusion between every two classes. Examples -------- Multilabel-indicator case: >>> import numpy as np >>> from sklearn.metrics import multilabel_confusion_matrix >>> y_true = np.array([[1, 0, 1], ... [0, 1, 0]]) >>> y_pred = np.array([[1, 0, 0], ... [0, 1, 1]]) >>> multilabel_confusion_matrix(y_true, y_pred) array([[[1, 0], [0, 1]], <BLANKLINE> [[1, 0], [0, 1]], <BLANKLINE> [[0, 1], [1, 0]]]) Multiclass case: >>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"] >>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"] >>> multilabel_confusion_matrix(y_true, y_pred, ... labels=["ant", "bird", "cat"]) array([[[3, 1], [0, 2]], <BLANKLINE> [[5, 0], [1, 0]], <BLANKLINE> [[2, 1], [1, 2]]]) """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) check_consistent_length(y_true, y_pred, sample_weight) if y_type not in ("binary", "multiclass", "multilabel-indicator"): raise ValueError("%s is not supported" % y_type) present_labels = unique_labels(y_true, y_pred) if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack( [labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) if y_true.ndim == 1: if samplewise: raise ValueError("Samplewise metrics are not available outside of " "multilabel classification.") le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ # labels are now from 0 to len(labels) - 1 -> use bincount tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: # Pathological case true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels)) # Retain only selected labels indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] else: sum_axis = 1 if samplewise else 0 # All labels are index integers for multilabel. # Select labels: if not np.array_equal(labels, present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels) for ' 'multilabel targets. ' 'Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels) for ' 'multilabel targets. ' 'Got %d < 0' % np.min(labels)) if n_labels is not None: y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] # calculate weighted counts true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) fp = pred_sum - tp_sum fn = true_sum - tp_sum tp = tp_sum if sample_weight is not None and samplewise: sample_weight = np.array(sample_weight) tp = np.array(tp) fp = np.array(fp) fn = np.array(fn) tn = sample_weight * y_true.shape[1] - tp - fp - fn elif sample_weight is not None: tn = sum(sample_weight) - tp - fp - fn elif samplewise: tn = y_true.shape[1] - tp - fp - fn else: tn = y_true.shape[0] - tp - fp - fn return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)
def label_ranking_loss(y_true, y_score, sample_weight=None): """Compute Ranking loss measure Compute the average number of label pairs that are incorrectly ordered given y_score weighted by the size of the label set and the number of labels not in the label set. This is similar to the error set size, but weighted by the number of relevant and irrelevant labels. The best performance is achieved with a ranking loss of zero. Read more in the :ref:`User Guide <label_ranking_loss>`. .. versionadded:: 0.17 A function *label_ranking_loss* Parameters ---------- y_true : array or sparse matrix, shape = [n_samples, n_labels] True binary labels in binary indicator format. y_score : array, shape = [n_samples, n_labels] Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float References ---------- .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and knowledge discovery handbook (pp. 667-685). Springer US. """ y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr') y_score = check_array(y_score, ensure_2d=False) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true) if y_type not in ("multilabel-indicator",): raise ValueError("{0} format is not supported".format(y_type)) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") n_samples, n_labels = y_true.shape y_true = csr_matrix(y_true) loss = np.zeros(n_samples) for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])): # Sort and bin the label scores unique_scores, unique_inverse = np.unique(y_score[i], return_inverse=True) true_at_reversed_rank = np.bincount( unique_inverse[y_true.indices[start:stop]], minlength=len(unique_scores)) all_at_reversed_rank = np.bincount(unique_inverse, minlength=len(unique_scores)) false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank # if the scores are ordered, it's possible to count the number of # incorrectly ordered paires in linear time by cumulatively counting # how many false labels of a given score have a score higher than the # accumulated true labels with lower score. loss[i] = np.dot(true_at_reversed_rank.cumsum(), false_at_reversed_rank) n_positives = count_nonzero(y_true, axis=1) with np.errstate(divide="ignore", invalid="ignore"): loss /= ((n_labels - n_positives) * n_positives) # When there is no positive or no negative labels, those values should # be consider as correct, i.e. the ranking doesn't matter. loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0. return np.average(loss, weights=sample_weight)
def multilabel_confusion_matrix(y_true, y_pred, sample_weight=None, labels=None, samplewise=False): y_type, y_true, y_pred = _check_targets(y_true, y_pred) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) check_consistent_length(y_true, y_pred, sample_weight) if y_type not in ("binary", "multiclass", "multilabel-indicator"): raise ValueError("%s is not supported" % y_type) present_labels = unique_labels(y_true, y_pred) if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack( [labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) if y_true.ndim == 1: if samplewise: raise ValueError("Samplewise metrics are not available outside of " "multilabel classification.") le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ # labels are now from 0 to len(labels) - 1 -> use bincount tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = np.bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: # Pathological case true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels)) # Retain only selected labels indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] else: sum_axis = 1 if samplewise else 0 # All labels are index integers for multilabel. # Select labels: if not np.array_equal(labels, present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels) for ' 'multilabel targets. ' 'Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels) for ' 'multilabel targets. ' 'Got %d < 0' % np.min(labels)) if n_labels is not None: y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] # calculate weighted counts true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) fp = pred_sum - tp_sum fn = true_sum - tp_sum tp = tp_sum if sample_weight is not None and samplewise: sample_weight = np.array(sample_weight) tp = np.array(tp) fp = np.array(fp) fn = np.array(fn) tn = sample_weight * y_true.shape[1] - tp - fp - fn elif sample_weight is not None: tn = sum(sample_weight) - tp - fp - fn elif samplewise: tn = y_true.shape[1] - tp - fp - fn else: tn = y_true.shape[0] - tp - fp - fn return np.array([tn, fp, fn, tp]).T.reshape(-1, 2, 2)