示例#1
0
def get_pearson_r(res):
    """
    :param res: results of evaluation, done using learners,
        wrapped into :class:`Orange.evaluation.reliability.Classifier`.
    :type res: :class:`Orange.evaluation.testing.ExperimentResults`

    Return Pearson's coefficient between the prediction error and each of the
    used reliability estimates. Also, return the p-value of each of
    the coefficients.
    """
    prediction_error = get_prediction_error_list(res)
    results = []
    for i in xrange(len(res.results[0].probabilities[0].reliability_estimate)):
        reliability_estimate, signed_or_absolute, method = get_reliability_estimation_list(
            res, i)
        try:
            if signed_or_absolute == SIGNED:
                r, p = statc.pearsonr(prediction_error, reliability_estimate)
            else:
                r, p = statc.pearsonr([abs(pe) for pe in prediction_error],
                                      reliability_estimate)
        except Exception:
            r = p = float("NaN")
        results.append((r, p, signed_or_absolute, method))
    return results
示例#2
0
def get_pearson_r_by_iterations(res):
    """
    :param res: results of evaluation, done using learners,
        wrapped into :class:`Orange.evaluation.reliability.Classifier`.
    :type res: :class:`Orange.evaluation.testing.ExperimentResults`

    Return average Pearson's coefficient over all folds between prediction error
    and each of the used estimates.
    """
    results_by_fold = Orange.evaluation.scoring.split_by_iterations(res)
    number_of_estimates = len(
        res.results[0].probabilities[0].reliability_estimate)
    number_of_instances = len(res.results)
    number_of_folds = len(results_by_fold)
    results = [0 for _ in xrange(number_of_estimates)]
    sig = [0 for _ in xrange(number_of_estimates)]
    method_list = [0 for _ in xrange(number_of_estimates)]

    for res in results_by_fold:
        prediction_error = get_prediction_error_list(res)
        for i in xrange(number_of_estimates):
            reliability_estimate, signed_or_absolute, method = get_reliability_estimation_list(
                res, i)
            try:
                if signed_or_absolute == SIGNED:
                    r, _ = statc.pearsonr(prediction_error,
                                          reliability_estimate)
                else:
                    r, _ = statc.pearsonr([abs(pe) for pe in prediction_error],
                                          reliability_estimate)
            except Exception:
                r = float("NaN")
            results[i] += r
            sig[i] = signed_or_absolute
            method_list[i] = method

    # Calculate p-values
    results = [float(res) / number_of_folds for res in results]
    ps = [p_value_from_r(r, number_of_instances) for r in results]

    return zip(results, ps, sig, method_list)
 def __call__(self, e1, e2):
     """
     :param e1: data instances.
     :param e2: data instances.
     
     Returns Pearson's disimilarity between e1 and e2,
     i.e. (1-r)/2 where r is Pearson's rank coefficient.
     """
     X1 = []
     X2 = []
     for i in self.indxs:
         if not(e1[i].isSpecial() or e2[i].isSpecial()):
             X1.append(float(e1[i]))
             X2.append(float(e2[i]))
     if not X1:
         return 1.0
     try:
         return (1.0 - statc.pearsonr(X1, X2)[0]) / 2.
     except:
         return 1.0
 def __call__(self, e1, e2):
     """
     Return absolute Pearson's dissimilarity between e1 and e2,
     i.e.
     
     .. math:: (1 - abs(r))/2
     
     where r is Pearson's correlation coefficient.
     """
     X1 = []; X2 = []
     for i in self.indxs:
         if not(e1[i].isSpecial() or e2[i].isSpecial()):
             X1.append(float(e1[i]))
             X2.append(float(e2[i]))
     if not X1:
         return 1.0
     try:
         return (1.0 - abs(statc.pearsonr(X1, X2)[0]))
     except:
         return 1.0
示例#5
0
 def __call__(self, e1, e2):
     """
     :param e1: data instances.
     :param e2: data instances.
     
     Returns Pearson's disimilarity between e1 and e2,
     i.e. (1-r)/2 where r is Pearson's rank coefficient.
     """
     X1 = []
     X2 = []
     for i in self.indxs:
         if not (e1[i].isSpecial() or e2[i].isSpecial()):
             X1.append(float(e1[i]))
             X2.append(float(e2[i]))
     if not X1:
         return 1.0
     try:
         return (1.0 - statc.pearsonr(X1, X2)[0]) / 2.
     except:
         return 1.0
示例#6
0
 def __call__(self, e1, e2):
     """
     Return absolute Pearson's dissimilarity between e1 and e2,
     i.e.
     
     .. math:: (1 - abs(r))/2
     
     where r is Pearson's correlation coefficient.
     """
     X1 = []
     X2 = []
     for i in self.indxs:
         if not (e1[i].isSpecial() or e2[i].isSpecial()):
             X1.append(float(e1[i]))
             X2.append(float(e2[i]))
     if not X1:
         return 1.0
     try:
         return (1.0 - abs(statc.pearsonr(X1, X2)[0]))
     except:
         return 1.0