示例#1
0
 def evaluate(self, ground_truth, result):
     """Return AUC metric."""
     binary_labels = labels_to_binary(ground_truth)
     binary_labels = np.hstack([
         binary_labels[column].values.reshape(-1, 1)
         for column in result.columns
     ])
     predicted_labels = np.hstack([
         result[column].values.reshape(-1, 1) for column in result.columns
     ])
     return log_loss(binary_labels, predicted_labels)
示例#2
0
 def evaluate(self, ground_truth, result):
     """Return AUC metric."""
     classwise_auc = {}
     binary_labels = labels_to_binary(ground_truth)
     binary_labels = np.hstack([
         binary_labels[column].values.reshape(-1, 1)
         for column in result.columns
     ])
     predicted_labels = np.hstack([
         result[column].values.reshape(-1, 1) for column in result.columns
     ])
     return roc_auc_score(binary_labels, predicted_labels, average='macro')
示例#3
0
 def evaluate(self, ground_truth, result):
     """Return AUC metric."""
     ground_truth = [yi[1] for yi in ground_truth]
     classes = list(set(ground_truth) | set(result.columns))
     classwise_auc = {}
     binary_labels = labels_to_binary(ground_truth)
     default = np.zeros(shape=[len(ground_truth), 1], dtype=np.int32)
     binary_labels = np.hstack([
         binary_labels[column].values.reshape(-1, 1)
         if column in binary_labels else default for column in classes
     ])
     predicted_labels = np.hstack([
         result[column].values.reshape(-1, 1)
         if column in result.columns else default for column in classes
     ])
     return roc_auc_score(binary_labels, predicted_labels, average='macro')