def build_eval_metrics(self, logits, labels):
        """ Building evaluation metrics while evaluating

        Args:
            logits (`Tensor`): list of tensors shape of [None, num_labels]
            labels (`Tensor`): shape of [None]
        Returns:
            ret_dict (`dict`): A dict of each layer accuracy tf.metrics op
        """
        if self.config.train_probes:
            return teacher_probes_eval_metrics(logits, labels,
                                               self.config.num_labels)
        else:
            return classification_eval_metrics(logits[0], labels,
                                               self.config.num_labels)
    def build_eval_metrics(self, logits, labels):
        """ Building evaluation metrics while evaluating

        Args:
            logits (`Tensor`): shape of [None, num_labels]
            labels (`Tensor`): shape of [None]
        Returns:
            ret_dict (`dict`): A dict with (`py_accuracy`, `py_micro_f1`, `py_macro_f1`) tf.metrics op
        """
        if hasattr(self.config, "multi_label") and self.config.multi_label:
            return multi_label_eval_metrics(logits, labels, self.config.num_labels)
        elif self.config.num_labels == 1:
            return regression_eval_metrics(logits, labels)
        else:
            return classification_eval_metrics(logits, labels, self.config.num_labels)
예제 #3
0
 def build_eval_metrics(self, logits, labels):
     return classification_eval_metrics(logits, labels, self.num_labels)
예제 #4
0
 def build_eval_metrics(self, logits, labels):
     if _APP_FLAGS.task_name == "CoLA":
         return matthew_corr_metrics(logits, labels)
     else:
         return classification_eval_metrics(logits, labels, self.num_labels)