Esempio n. 1
0
def test_precision(caplog):
    """Unit test of precision_scorer"""

    caplog.set_level(logging.INFO)

    golds = np.array([0, 1, 0, 1, 0, 1])
    preds = np.array([0, 0, 0, 0, 0, 1])

    metric_dict = precision_scorer(golds, None, preds, pos_label=1)
    assert isequal(metric_dict, {"precision": 1})

    metric_dict = precision_scorer(golds, None, preds, pos_label=0)
    assert isequal(metric_dict, {"precision": 0.6})
Esempio n. 2
0
def fbeta_scorer(golds, probs, preds, uids=None, pos_label=1, beta=1):
    """F-beta score is the weighted harmonic mean of precision and recall.

    :param golds: Ground truth (correct) target values.
    :type golds: 1-d np.array
    :param probs: Predicted target probabilities. (Not used!)
    :type probs: k-d np.array
    :param preds: Predicted target values.
    :type preds: 1-d np.array
    :param uids: Unique ids.
    :type uids: list
    :param pos_label: The positive class label, defaults to 1
    :param pos_label: int, optional
    :param beta: Weight of precision in harmonic mean, defaults to 1
    :param beta: float, optional
    :return: F-beta score.
    :rtype: dict
    """

    precision = precision_scorer(golds, probs, preds, uids,
                                 pos_label)["precision"]
    recall = recall_scorer(golds, probs, preds, uids, pos_label)["recall"]

    fbeta = ((1 + beta**2) * (precision * recall) /
             ((beta**2 * precision) + recall) if
             (beta**2 * precision) + recall > 0 else 0.0)

    return {f"f{beta}": fbeta}
Esempio n. 3
0
def fbeta_scorer(
    golds: ndarray,
    probs: Optional[ndarray],
    preds: ndarray,
    uids: Optional[List[str]] = None,
    pos_label: int = 1,
    beta: int = 1,
) -> Dict[str, float]:
    """F-beta score is the weighted harmonic mean of precision and recall.

    Args:
      golds(ndarray): Ground truth values.
      probs(ndarray or None): Predicted probabilities.
      preds(ndarray): Predicted values.
      uids(list, optional): Unique ids, defaults to None.
      pos_label(int, optional): The positive class label, defaults to 1.
      beta(float, optional): Weight of precision in harmonic mean, defaults to 1.

    Returns:
      dict: F-beta score.

    """

    precision = precision_scorer(golds, probs, preds, uids,
                                 pos_label)["precision"]
    recall = recall_scorer(golds, probs, preds, uids, pos_label)["recall"]

    fbeta = ((1 + beta**2) * (precision * recall) /
             ((beta**2 * precision) + recall) if
             (beta**2 * precision) + recall > 0 else 0.0)

    return {f"f{beta}": fbeta}
Esempio n. 4
0
def fbeta_scorer(
    golds: ndarray,
    probs: Optional[ndarray],
    preds: ndarray,
    uids: Optional[List[str]] = None,
    pos_label: int = 1,
    beta: int = 1,
) -> Dict[str, float]:
    """F-beta score is the weighted harmonic mean of precision and recall.

    Args:
      golds: Ground truth values.
      probs: Predicted probabilities.
      preds: Predicted values.
      uids: Unique ids, defaults to None.
      pos_label: The positive class label, defaults to 1.
      beta: Weight of precision in harmonic mean, defaults to 1.

    Returns:
      F-beta score.
    """
    # Convert probabilistic label to hard label
    if len(golds.shape) == 2:
        golds = prob_to_pred(golds)

    precision = precision_scorer(golds, probs, preds, uids,
                                 pos_label)["precision"]
    recall = recall_scorer(golds, probs, preds, uids, pos_label)["recall"]

    fbeta = ((1 + beta**2) * (precision * recall) /
             ((beta**2 * precision) + recall) if
             (beta**2 * precision) + recall > 0 else 0.0)

    return {f"f{beta}": fbeta}
Esempio n. 5
0
def test_precision(caplog):
    """Unit test of precision_scorer"""

    caplog.set_level(logging.INFO)

    golds = np.array([0, 1, 0, 1, 0, 1])
    gold_probs = np.array([[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8],
                           [0.9, 0.1], [0.4, 0.6]])
    preds = np.array([0, 0, 0, 0, 0, 1])

    metric_dict = precision_scorer(golds, None, preds, pos_label=1)
    assert isequal(metric_dict, {"precision": 1})

    metric_dict = precision_scorer(golds, None, preds, pos_label=0)
    assert isequal(metric_dict, {"precision": 0.6})

    metric_dict = precision_scorer(gold_probs, None, preds, pos_label=1)
    assert isequal(metric_dict, {"precision": 1})

    metric_dict = precision_scorer(gold_probs, None, preds, pos_label=0)
    assert isequal(metric_dict, {"precision": 0.6})
Esempio n. 6
0
def test_precision(caplog):
    """Unit test of precision_scorer."""
    caplog.set_level(logging.INFO)

    metric_dict = precision_scorer(GOLDS, PROBS, PREDS, pos_label=1)
    assert isequal(metric_dict, {"precision": 1})

    metric_dict = precision_scorer(GOLDS, None, PREDS, pos_label=1)
    assert isequal(metric_dict, {"precision": 1})

    metric_dict = precision_scorer(GOLDS, None, PREDS, pos_label=0)
    assert isequal(metric_dict, {"precision": 0.6})

    metric_dict = precision_scorer(PROB_GOLDS, PROBS, PREDS, pos_label=1)
    assert isequal(metric_dict, {"precision": 1})

    metric_dict = precision_scorer(PROB_GOLDS, None, PREDS, pos_label=1)
    assert isequal(metric_dict, {"precision": 1})

    metric_dict = precision_scorer(PROB_GOLDS, None, PREDS, pos_label=0)
    assert isequal(metric_dict, {"precision": 0.6})