def precision(self, labels, pred_scores): """ Compute the precision Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. thresholds: value list. This parameter effective only for 'binary'. The predict scores will be 1 if it larger than thresholds, if not, if will be 0. If not only one threshold in it, it will return several results according to the thresholds. default None result_filter: value list. If result_filter is not None, it will filter the label results not in result_filter. Returns ---------- dict The key is threshold and the value is another dic, which key is label in parameter labels, and value is the label's precision. """ if self.eval_type == consts.BINARY: precision_operator = classification_metric.BiClassPrecision() metric_scores, score_threshold, cuts = precision_operator.compute( labels, pred_scores) return metric_scores, cuts, score_threshold elif self.eval_type == consts.MULTY: precision_operator = classification_metric.MultiClassPrecision() return precision_operator.compute(labels, pred_scores) else: logging.warning("error:can not find classification type:{}".format( self.eval_type))
def quantile_pr(self, labels, pred_scores): if self.eval_type == consts.BINARY: p = classification_metric.BiClassPrecision(cut_method='quantile', remove_duplicate=False) r = classification_metric.BiClassRecall(cut_method='quantile', remove_duplicate=False) p_scores, score_threshold, cuts = p.compute(labels, pred_scores) r_scores, score_threshold, cuts = r.compute(labels, pred_scores) p_scores = list(map(list, np.flip(p_scores, axis=0))) r_scores = list(map(list, np.flip(r_scores, axis=0))) score_threshold = list(np.flip(score_threshold)) return p_scores, r_scores, score_threshold else: logging.warning('error: pr quantile is for binary classification only')
from federatedml.evaluation.metrics.classification_metric \ import BiClassAccuracy, BiClassRecall, BiClassPrecision, KS, Lift, Gain, FScore from federatedml.evaluation.metric_interface import MetricInterface from federatedml.evaluation.metrics import classification_metric import numpy as np from federatedml.evaluation.backup.evaluation import BiClassPrecision as BiClassPrecision2 scores = np.random.random(100) labels = (scores > 0.5) + 0 interface = MetricInterface(1, 'binary') mat = interface.confusion_mat(labels, scores) rs = classification_metric.ThresholdCutter.cut_by_quantile(scores, ) rs2 = classification_metric.BiClassRecall(cut_method='quantile').compute( labels, scores) rs3 = classification_metric.BiClassPrecision(cut_method='quantile').compute( labels, scores) rs4 = BiClassPrecision2().compute(labels, scores, rs2[1]) comp1 = [i[1] for i in rs3[0]] comp2 = [i[1] for i in rs4[0]] print(comp1 == comp2)