Beispiel #1
0
  def build_output(self, model):  # pylint: disable=no-self-use
    """
    Build the output of the model.
    `score` and `input_y` are for loss calculation.
    `preds` and `y_ground_truth` are for metric calculation.
    """

    transitions = model.transitions
    intent_logits, slots_logits = model.logits
    input_intent_y, input_slots_y = model.input_y

    intent_score = tf.nn.softmax(intent_logits, name="intent_score")
    intent_preds = tf.argmax(intent_logits, axis=-1, name="intent_preds")
    y_intent_ground_truth = tf.argmax(
        input_intent_y, axis=-1, name="y_intent_ground_truth")

    slots_preds, slots_score = crf_decode(slots_logits, transitions,
                                          model.input_x_len)

    slots_preds = tf.identity(slots_preds, name="slots_preds")
    slots_score = tf.identity(slots_score, name="slots_score")
    y_slots_ground_truth = tf.identity(
        input_slots_y, name="y_slots_ground_truth")

    model.preds = intent_preds, slots_preds
    model.score = intent_score, slots_score
    model.y_ground_truth = y_intent_ground_truth, y_slots_ground_truth
    logging.info("Model built.")
Beispiel #2
0
    def build_output(self, model):  # pylint: disable=no-self-use
        """
    Build the output of the model.
    `score` and `input_y` are for loss calculation.
    `preds` and `y_ground_truth` are for metric calculation.
    """

        model.score = tf.nn.softmax(model.logits, name="score")
        model.preds = tf.argmax(model.logits, axis=-1, name="preds")
        model.y_ground_truth = tf.argmax(model.input_y, axis=-1)
Beispiel #3
0
 def build_output(self, model):  # pylint: disable=no-self-use
     """
 Build the output of the model.
 `score` and `input_y` are for loss calculation.
 `preds` and `y_ground_truth` are for metric calculation.
 """
     if self.tasktype == "Classification":
         model.score = tf.nn.softmax(model.logits, name="score")
         model.preds = tf.argmax(model.logits, axis=-1)
         model.y_ground_truth = tf.argmax(model.input_y, axis=-1)
     else:
         raise ValueError("%s is not a valid task type."
                          "Must be in `Ranking` and `Classification`." %
                          (self.tasktype))
    def build_output(self, model):  # pylint: disable=no-self-use
        """
    Build the output of the model.
    `score` and `input_y` are for loss calculation.
    `preds` and `y_ground_truth` are for metric calculation.
    """

        model.score = tf.nn.softmax(model.logits, name="score")
        model.preds = tf.argmax(model.logits, axis=-1)
        if hasattr(model, "input_y"):
            model.y_ground_truth = tf.argmax(model.input_y, axis=-1)
        model.output_dict = {"score": model.score, "preds": model.preds}

        if model.use_pretrained_model:
            self.initialize_pretrained_model_variables(
                model.pretrained_model_path, model.pretrained_model_mode)
Beispiel #5
0
  def compute_lens(inputs, max_len):
    """count sequence length.
    input: [batch_size, max_len]
    lens: [batch_size]
    """

    x_binary = tf.cast(tf.cast(tf.reverse(inputs, axis=[1]), tf.bool), tf.int32)
    lens = max_len - tf.argmax(x_binary, axis=1, output_type=tf.int32)

    zeros = tf.zeros_like(lens, dtype=tf.int32)
    x_sum = tf.reduce_sum(inputs, axis=1)
    sen_lens = tf.where(tf.equal(x_sum, 0), zeros, lens)
    return sen_lens
Beispiel #6
0
  def get_eval_hooks(self, labels, logits):
    ''' lables: [batch]
            logits: [batch, num_classes]
        '''
    eval_hooks = []
    metric_tensor = {}
    with tf.variable_scope('metrics'):
      true_label = labels
      softmax = tf.nn.softmax(logits)
      pred_label = tf.argmax(softmax, -1)
      eval_metrics_ops = {
          'accuracy':
              tf.metrics.accuracy(
                  labels=true_label, predictions=pred_label, weights=None),
          'auc':
              tf.metrics.auc(
                  labels=true_label,
                  predictions=softmax[:, -1],
                  num_thresholds=20,
                  curve='ROC',
                  summation_method='trapezoidal'),
          'precision':
              tf.metrics.precision(
                  labels=true_label, predictions=pred_label, weights=None),
          'recall':
              tf.metrics.recall(
                  labels=true_label, predictions=pred_label, weights=None),
          'tp':
              tf.metrics.true_positives(
                  labels=true_label, predictions=pred_label, weights=None),
          'fn':
              tf.metrics.false_negatives(
                  labels=true_label, predictions=pred_label, weights=None),
          'fp':
              tf.metrics.false_positives(
                  labels=true_label, predictions=pred_label, weights=None),
          'tn':
              tf.metrics.true_negatives(
                  labels=true_label, predictions=pred_label, weights=None),
      }

    metric_tensor.update({key: val[0] for key, val in eval_metrics_ops.items()})
    metric_hook = tf.train.LoggingTensorHook(
        tensors=metric_tensor,
        every_n_iter=10000,
        every_n_secs=None,
        at_end=False,
        formatter=None)
    eval_hooks.append(metric_hook)
    return eval_hooks, eval_metrics_ops
Beispiel #7
0
 def build_output(self, model):  # pylint: disable=no-self-use
   """
   Build the output of the model.
   `score` and `input_y` are for loss calculation.
   `preds` and `y_ground_truth` are for metric calculation.
   """
   if model.mode != utils.INFER:
     model.score = tf.nn.softmax(model.logits, name="score")
     model.preds = tf.argmax(model.logits, axis=-1)
     model.output_dict = {"score": model.score, "preds": model.preds}
   else:
     model.preds = model.logits
     model.output_dict = {"preds": model.preds}
   if hasattr(model, "input_y"):
     model.y_ground_truth = model.input_y
Beispiel #8
0
def accuracy(logits, labels):
    ''' accuracy candies
  params:
    logits: [B, ..., D]
    labels: [B, ...]
  return:
    accuracy tensor
  '''
    with tf.name_scope('accuracy'):
        assert_rank = tf.assert_equal(tf.rank(logits), tf.rank(labels) + 1)
        assert_shape = tf.assert_equal(tf.shape(logits)[:-1], tf.shape(labels))
        with tf.control_dependencies([assert_rank, assert_shape]):
            predictions = tf.argmax(logits, axis=-1, output_type=tf.int64)
            labels = tf.cast(labels, tf.int64)
            return tf.reduce_mean(
                tf.cast(tf.equal(predictions, labels), dtype=tf.float32))
Beispiel #9
0
def eval(model, dataset, cmvn):
  avg_loss = tfe.metrics.Mean('loss', dtype=tf.float32)
  accuracy = tfe.metrics.Accuracy('accuracy', dtype=tf.float32)

  for (batch, (feats, texts, labels, filenames,
               clip_ids)) in enumerate(dataset):
    feats = utils.apply_cmvn(feats, cmvn[0], cmvn[1])

    logits = model(feats, training=False)
    avg_loss(utils.losses(logits, labels, is_train=False))
    accuracy(
        tf.argmax(logits, axis=-1, output_type=tf.int64),
        tf.cast(labels, tf.int64))
    print("Eval set: Average loss: %0.4f, Accuracy: %4f%%\n" %
          (avg_loss.result(), 100 * accuracy.result()))

    with tf.contrib.summary.always_record_summaries():
      tf.contrib.summary.scalar('loss', avg_loss.result())
      tf.contrib.summary.scalar('accuracy', accuracy.result())
Beispiel #10
0
def confusion_matrix(logits, labels, num_class):
    ''' confusion matrix candies '''
    return tf.confusion_matrix(labels=tf.reshape(labels, [-1]),
                               predictions=tf.reshape(tf.argmax(logits, -1),
                                                      [-1]),
                               num_classes=num_class)