def pearson_spearman_scorer( golds: ndarray, probs: ndarray, preds: Optional[ndarray], uids: Optional[List[str]] = None, ) -> Dict[str, float]: """Average of Pearson and Spearman rank-order correlation coefficients. Args: golds: Ground truth values. probs: Predicted probabilities. preds: Predicted values. uids: Unique ids, defaults to None. Returns: The average of Pearson correlation coefficient and Spearman rank-order correlation coefficient. """ metrics = dict() pearson_correlation = pearson_correlation_scorer(golds, probs, preds, uids) spearman_correlation = spearman_correlation_scorer(golds, probs, preds, uids) metrics["pearson_spearman"] = mean([ pearson_correlation["pearson_correlation"], spearman_correlation["spearman_correlation"], ]) return metrics
def pearson_spearman_scorer(golds, probs, preds, uids=None): """Average of Pearson correlation coefficient and Spearman rank-order correlation coefficient. :param golds: Ground truth (correct) target values. :type golds: 1-d np.array :param probs: Predicted target probabilities. :type probs: 1-d np.array :param preds: Predicted target values. (Not used!) :type preds: 1-d np.array :param uids: Unique ids. :type uids: list :return: Pearson correlation coefficient, the p-value and Spearman rank-order correlation coefficient and the average. :rtype: dict """ metrics = dict() pearson_correlation = pearson_correlation_scorer(golds, probs, preds, uids) spearman_correlation = spearman_correlation_scorer(golds, probs, preds, uids) metrics["pearson_spearman"] = np.mean([ pearson_correlation["pearson_correlation"], spearman_correlation["spearman_correlation"], ]) return metrics
def test_spearman_correlation(caplog): """Unit test of spearman_correlation_scorer.""" caplog.set_level(logging.INFO) metric_dict = spearman_correlation_scorer(GOLDS, UNARY_PROBS, None) assert isequal(metric_dict, {"spearman_correlation": 0.5940885257860046}) metric_dict = spearman_correlation_scorer(GOLDS, UNARY_PROBS, None, return_pvalue=True) assert isequal( metric_dict, { "spearman_correlation": 0.5940885257860046, "spearman_pvalue": 0.21370636293028789, }, )
def test_spearman_correlation(caplog): """Unit test of spearman_correlation_scorer""" caplog.set_level(logging.INFO) golds = np.array([1, 0, 1, 0, 1, 0]) probs = np.array([0.8, 0.6, 0.9, 0.7, 0.7, 0.2]) metric_dict = spearman_correlation_scorer(golds, probs, None) assert isequal(metric_dict, {"spearman_correlation": 0.7921180343813395}) metric_dict = spearman_correlation_scorer(golds, probs, None, return_pvalue=True) assert isequal( metric_dict, { "spearman_correlation": 0.7921180343813395, "spearman_pvalue": 0.06033056705743058, }, )