def pearson_spearman_scorer( golds: ndarray, probs: ndarray, preds: Optional[ndarray], uids: Optional[List[str]] = None, ) -> Dict[str, float]: """Average of Pearson and Spearman rank-order correlation coefficients. Args: golds: Ground truth values. probs: Predicted probabilities. preds: Predicted values. uids: Unique ids, defaults to None. Returns: The average of Pearson correlation coefficient and Spearman rank-order correlation coefficient. """ metrics = dict() pearson_correlation = pearson_correlation_scorer(golds, probs, preds, uids) spearman_correlation = spearman_correlation_scorer(golds, probs, preds, uids) metrics["pearson_spearman"] = mean([ pearson_correlation["pearson_correlation"], spearman_correlation["spearman_correlation"], ]) return metrics
def pearson_spearman_scorer(golds, probs, preds, uids=None): """Average of Pearson correlation coefficient and Spearman rank-order correlation coefficient. :param golds: Ground truth (correct) target values. :type golds: 1-d np.array :param probs: Predicted target probabilities. :type probs: 1-d np.array :param preds: Predicted target values. (Not used!) :type preds: 1-d np.array :param uids: Unique ids. :type uids: list :return: Pearson correlation coefficient, the p-value and Spearman rank-order correlation coefficient and the average. :rtype: dict """ metrics = dict() pearson_correlation = pearson_correlation_scorer(golds, probs, preds, uids) spearman_correlation = spearman_correlation_scorer(golds, probs, preds, uids) metrics["pearson_spearman"] = np.mean([ pearson_correlation["pearson_correlation"], spearman_correlation["spearman_correlation"], ]) return metrics
def test_pearson_correlation(caplog): """Unit test of pearson_correlation_scorer.""" caplog.set_level(logging.INFO) metric_dict = pearson_correlation_scorer(GOLDS, UNARY_PROBS, None) assert isequal(metric_dict, {"pearson_correlation": 0.5667402091575048}) metric_dict = pearson_correlation_scorer(GOLDS, UNARY_PROBS, None, return_pvalue=True) assert isequal( metric_dict, { "pearson_correlation": 0.5667402091575048, "pearson_pvalue": 0.24090659530906683, }, )
def test_pearson_correlation(caplog): """Unit test of pearson_correlation_scorer""" caplog.set_level(logging.INFO) golds = np.array([1, 0, 1, 0, 1, 0]) probs = np.array([0.8, 0.6, 0.9, 0.7, 0.7, 0.2]) metric_dict = pearson_correlation_scorer(golds, probs, None) assert isequal(metric_dict, {"pearson_correlation": 0.6764814252025461}) metric_dict = pearson_correlation_scorer(golds, probs, None, return_pvalue=True) assert isequal( metric_dict, { "pearson_correlation": 0.6764814252025461, "pearson_pvalue": 0.14006598491201777, }, )