Exemple #1
0
 def eval(self, predict, labels_map):
     label = labels_map[self._label_name]
     if np.sum(label) == 0 or np.sum(label) == label.size:
         return MetricResult(result=float('nan'))
     else:
         auc = roc_auc_score(y_true=label, y_score=predict)
         return MetricResult(result=auc, meta={'#': predict.size})
Exemple #2
0
 def eval(self, predict, labels_map):
     label = labels_map[self._label_name]
     if np.sum(label) == 0 or np.sum(label) == label.size:
         return MetricResult(result=float('nan'))
     else:
         mse = mean_squared_error(y_true=label, y_pred=predict)
         return MetricResult(result=mse, meta={'#': predict.size})
Exemple #3
0
    def eval(self, predict, labels_map):
        label = labels_map[self._label_name]
        group_key = labels_map[self._group_key_name]
        weight = np.maximum(labels_map[self._weight_name], 1)
        predict_groups, label_groups = defaultdict(list), defaultdict(list)
        weight_groups = defaultdict(list)
        for l, p, key, w in zip(label.flatten(), predict.flatten(),
                                group_key.flatten(), weight.flatten()):
            predict_groups[key].append(p)
            label_groups[key].append(l)
            weight_groups[key].append(w)

        weight_sum, auc_sum = 0, 0
        for key in label_groups.keys():
            if len(label_groups) >= 2:
                n_pos = sum(label_groups[key])
                n_neg = len(label_groups[key]) - n_pos
                if n_pos > 0 and n_neg > 0:
                    weight_sum += n_pos * n_neg
                    cur_auc = roc_auc_score(y_true=label_groups[key],
                                            y_score=predict_groups[key],
                                            sample_weight=weight_groups[key])
                    auc_sum += n_pos * n_neg * cur_auc

        if weight_sum == 0:
            return MetricResult(result=float('nan'))
        else:
            wgauc = auc_sum / weight_sum
            return MetricResult(result=wgauc)
Exemple #4
0
    def eval(self, predict, labels_map):
        label = labels_map[self._label_name]
        group_key = labels_map[self._group_key_name]
        predict_groups, label_groups = defaultdict(list), defaultdict(list)
        for l, p, key in zip(label.flatten(), predict.flatten(),
                             group_key.flatten()):
            predict_groups[key].append(p)
            label_groups[key].append(l)

        weight_sum, auc_sum = 0, 0
        for key in label_groups.keys():
            if len(label_groups) >= 2:
                n_pos = sum(label_groups[key])
                n_neg = len(label_groups[key]) - n_pos
                if n_pos > 0 and n_neg > 0:
                    weight_sum += n_pos * n_neg
                    cur_auc = roc_auc_score(y_true=label_groups[key],
                                            y_score=predict_groups[key])
                    auc_sum += n_pos * n_neg * cur_auc

        if weight_sum == 0:
            return MetricResult(result=float('nan'))
        else:
            gauc = auc_sum / weight_sum
            return MetricResult(result=gauc,
                                meta={
                                    '#': predict.size,
                                    '#pairs': weight_sum
                                })
Exemple #5
0
 def eval(self, predict, labels_map):
     label = labels_map[self._label_name]
     if np.sum(label) == 0 or np.sum(label) == label.size:
         return MetricResult(result=float('nan'))
     else:
         weight=np.where(label>0,np.ones_like(label),np.zeros_like(label))
         mse = mean_squared_error(y_true=label, y_pred=predict,sample_weight=weight)
         return MetricResult(result=mse, meta={'#': predict.size})
Exemple #6
0
 def eval(self, predict, labels_map):
     label = labels_map[self._label_name]
     weight = np.maximum(labels_map[self._weight_name], 1)
     if np.sum(label) == 0 or np.sum(label) == label.size:
         return MetricResult(result=float('nan'))
     else:
         wauc = roc_auc_score(y_true=label,
                              y_score=predict,
                              sample_weight=weight)
         return MetricResult(result=wauc)