예제 #1
0
 def test_confusion_matrix(self):
     anno1 = np.array([0, 0, 1, 1, 2, 3])
     anno2 = np.array([0, 1, 1, 1, 2, 2])
     expected = np.array([[1, 1, 0, 0], [0, 2, 0, 0], [0, 0, 1, 0],
                          [0, 0, 1, 0]])
     cm = pmh.confusion_matrix(anno1, anno2, 4)
     np.testing.assert_array_equal(cm, expected)
예제 #2
0
 def test_confusion_matrix_missing(self):
     """Test confusion matrix with missing data."""
     anno1 = np.array([0, 0, 1, 1, MV, 3])
     anno2 = np.array([0, MV, 1, 1, 2, 2])
     expected = np.array([[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0],
                          [0, 0, 1, 0]])
     cm = pmh.confusion_matrix(anno1, anno2, 4)
     np.testing.assert_array_equal(cm, expected)
예제 #3
0
 def test_confusion_matrix(self):
     anno1 = np.array([0, 0, 1, 1, 2, 3])
     anno2 = np.array([0, 1, 1, 1, 2, 2])
     expected = np.array(
         [
             [1, 1, 0, 0],
             [0, 2, 0, 0],
             [0, 0, 1, 0],
             [0, 0, 1, 0]
         ])
     cm = pmh.confusion_matrix(anno1, anno2, 4)
     np.testing.assert_array_equal(cm, expected)
예제 #4
0
 def test_confusion_matrix_missing(self):
     """Test confusion matrix with missing data."""
     anno1 = np.array([0, 0, 1, 1, MV, 3])
     anno2 = np.array([0, MV, 1, 1, 2, 2])
     expected = np.array(
         [
             [1, 0, 0, 0],
             [0, 2, 0, 0],
             [0, 0, 0, 0],
             [0, 0, 1, 0]
         ])
     cm = pmh.confusion_matrix(anno1, anno2, 4)
     np.testing.assert_array_equal(cm, expected)
예제 #5
0
def cohens_weighted_kappa(annotations1, annotations2, weights_func=diagonal_distance, nclasses=None):
    """Compute Cohen's weighted kappa for two annotators.

    Assumes that the annotators draw annotations at random with different but
    constant frequencies. Disagreements are weighted by a weights
    w_ij representing the "seriousness" of disagreement. For ordered codes,
    it is often set to the distance from the diagonal, i.e. `w_ij = |i-j|`.

    When w_ij is 0.0 on the diagonal and 1.0 elsewhere,
    Cohen's weighted kappa is equivalent to Cohen's kappa.

    See also:
    :func:`~pyanno.measures.distances.diagonal_distance`,
    :func:`~pyanno.measures.distances.binary_distance`,
    :func:`~pyanno.measures.agreement.cohens_kappa`,
    :func:`~pyanno.measures.helpers.pairwise_matrix`

    **References:**

    * Cohen, J. (1968). "Weighed kappa: Nominal scale agreement with provision
      for scaled disagreement or partial credit". Psychological Bulletin
      70 (4): 213-220.

    * `Wikipedia entry <http://en.wikipedia.org/wiki/Cohen%27s_kappa>`_

    Arguments
    ---------
    annotations1 : ndarray, shape = (n_items, )
        Array of annotations for a single annotator. Missing values should be
        indicated by :attr:`pyanno.util.MISSING_VALUE`

    annotations2 : ndarray, shape = (n_items, )
        Array of annotations for a single annotator. Missing values should be
        indicated by :attr:`pyanno.util.MISSING_VALUE`

    weights_func : function(m_i, m_j)
        Weights function that receives two matrices of indices
        i, j and returns the matrix of weights between them.
        Default is :func:`~pyanno.measures.distances.diagonal_distance`

    nclasses : int
        Number of annotation classes. If None, `nclasses` is inferred from the
        values in the annotations

    Returns
    -------
    stat : float
        The value of the statistics
    """

    if all_invalid(annotations1, annotations2):
        logger.debug("No valid annotations")
        return np.nan

    if nclasses is None:
        nclasses = compute_nclasses(annotations1, annotations2)

    # observed probability of each combination of annotations
    observed_freq = confusion_matrix(annotations1, annotations2, nclasses)
    observed_freq_sum = observed_freq.sum()
    if observed_freq_sum == 0:
        return np.nan

    observed_freq /= observed_freq_sum

    # expected probability of each combination of annotations if annotators
    # draw annotations at random with different but constant frequencies
    freq1 = labels_frequency(annotations1, nclasses)
    freq2 = labels_frequency(annotations2, nclasses)
    chance_freq = np.outer(freq1, freq2)

    # build weights matrix from weights function
    weights = np.fromfunction(weights_func, shape=(nclasses, nclasses), dtype=float)

    kappa = 1.0 - (weights * observed_freq).sum() / (weights * chance_freq).sum()

    return kappa
예제 #6
0
def cohens_weighted_kappa(annotations1, annotations2,
                          weights_func = diagonal_distance,
                          nclasses=None):
    """Compute Cohen's weighted kappa for two annotators.

    Assumes that the annotators draw annotations at random with different but
    constant frequencies. Disagreements are weighted by a weights
    w_ij representing the "seriousness" of disagreement. For ordered codes,
    it is often set to the distance from the diagonal, i.e. `w_ij = |i-j|`.

    When w_ij is 0.0 on the diagonal and 1.0 elsewhere,
    Cohen's weighted kappa is equivalent to Cohen's kappa.

    See also:
    :func:`~pyanno.measures.distances.diagonal_distance`,
    :func:`~pyanno.measures.distances.binary_distance`,
    :func:`~pyanno.measures.agreement.cohens_kappa`,
    :func:`~pyanno.measures.helpers.pairwise_matrix`

    **References:**

    * Cohen, J. (1968). "Weighed kappa: Nominal scale agreement with provision
      for scaled disagreement or partial credit". Psychological Bulletin
      70 (4): 213-220.

    * `Wikipedia entry <http://en.wikipedia.org/wiki/Cohen%27s_kappa>`_

    Arguments
    ---------
    annotations1 : ndarray, shape = (n_items, )
        Array of annotations for a single annotator. Missing values should be
        indicated by :attr:`pyanno.util.MISSING_VALUE`

    annotations2 : ndarray, shape = (n_items, )
        Array of annotations for a single annotator. Missing values should be
        indicated by :attr:`pyanno.util.MISSING_VALUE`

    weights_func : function(m_i, m_j)
        Weights function that receives two matrices of indices
        i, j and returns the matrix of weights between them.
        Default is :func:`~pyanno.measures.distances.diagonal_distance`

    nclasses : int
        Number of annotation classes. If None, `nclasses` is inferred from the
        values in the annotations

    Returns
    -------
    stat : float
        The value of the statistics
    """

    if all_invalid(annotations1, annotations2):
        logger.debug('No valid annotations')
        return np.nan

    if nclasses is None:
        nclasses = compute_nclasses(annotations1, annotations2)

    # observed probability of each combination of annotations
    observed_freq = confusion_matrix(annotations1, annotations2, nclasses)
    observed_freq_sum = observed_freq.sum()
    if observed_freq_sum == 0:
        return np.nan

    observed_freq /= observed_freq_sum

    # expected probability of each combination of annotations if annotators
    # draw annotations at random with different but constant frequencies
    freq1 = labels_frequency(annotations1, nclasses)
    freq2 = labels_frequency(annotations2, nclasses)
    chance_freq = np.outer(freq1, freq2)

    # build weights matrix from weights function
    weights = np.fromfunction(weights_func, shape=(nclasses, nclasses),
                              dtype=float)

    kappa = 1. - (weights*observed_freq).sum() / (weights*chance_freq).sum()

    return kappa