예제 #1
0
def _sigmoid_entropy(probabilities, targets, weights=None):
  return metric_ops.streaming_mean(
      losses.sigmoid_cross_entropy(probabilities,
                                   _squeeze_and_onehot(
                                       targets,
                                       array_ops.shape(probabilities)[1])),
      weights=weights)
예제 #2
0
def _sigmoid_entropy(probabilities, targets, weights=None):
  return metric_ops.streaming_mean(
      losses.sigmoid_cross_entropy(probabilities,
                                   _squeeze_and_onehot(
                                       targets,
                                       array_ops.shape(probabilities)[1])),
      weights=weights)
예제 #3
0
def _r2(probabilities, targets, weights=None):
  targets = math_ops.to_float(targets)
  y_mean = math_ops.reduce_mean(targets, 0)
  squares_total = math_ops.reduce_sum(math_ops.square(targets - y_mean), 0)
  squares_residuals = math_ops.reduce_sum(
      math_ops.square(targets - probabilities), 0)
  score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
  return metric_ops.streaming_mean(score, weights=weights)
예제 #4
0
def _r2(probabilities, targets, weights=None):
    targets = math_ops.to_float(targets)
    y_mean = math_ops.reduce_mean(targets, 0)
    squares_total = math_ops.reduce_sum(math_ops.square(targets - y_mean), 0)
    squares_residuals = math_ops.reduce_sum(
        math_ops.square(targets - probabilities), 0)
    score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
    return metric_ops.streaming_mean(score, weights=weights)
예제 #5
0
def _r2(probabilities, targets):
    if targets.get_shape().ndims == 1:
        targets = array_ops.expand_dims(targets, -1)
    y_mean = math_ops.reduce_mean(targets, 0)
    squares_total = math_ops.reduce_sum(math_ops.square(targets - y_mean), 0)
    squares_residuals = math_ops.reduce_sum(math_ops.square(targets - probabilities), 0)
    score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
    return metric_ops.streaming_mean(score)
예제 #6
0
    def build_graph(self, data_paths, batch_size, is_training):
        """Builds generic graph for training or eval."""
        tensors = GraphReferences()

        _, tensors.examples = util.read_examples(
            data_paths,
            batch_size,
            shuffle=is_training,
            num_epochs=None if is_training else 2)

        parsed = parse_examples(tensors.examples)

        # Build a Graph that computes predictions from the inference model.
        logits = inference(parsed['images'], self.hidden1, self.hidden2)

        # Add to the Graph the Ops for loss calculation.
        loss_value = loss(logits, parsed['labels'])

        # Add to the Graph the Ops for accuracy calculation.
        accuracy_value = evaluation(logits, parsed['labels'])

        # Add to the Graph the Ops that calculate and apply gradients.
        if is_training:
            tensors.train, tensors.global_step = training(
                loss_value, self.learning_rate)
        else:
            tensors.global_step = tf.Variable(0,
                                              name='global_step',
                                              trainable=False)

        # Add streaming means.
        loss_op, loss_update = metric_ops.streaming_mean(loss_value)
        accuracy_op, accuracy_update = metric_ops.streaming_mean(
            accuracy_value)

        tf.scalar_summary('accuracy', accuracy_op)
        tf.scalar_summary('loss', loss_op)

        # HYPERPARAMETER TUNING: Write the objective value.
        if not is_training:
            tf.scalar_summary('training/hptuning/metric', accuracy_op)

        tensors.metric_updates = [loss_update, accuracy_update]
        tensors.metric_values = [loss_op, accuracy_op]
        return tensors
예제 #7
0
 def get_eval_ops(self, features, logits, labels, metrics=None):
     loss = self.loss(logits, labels, features)
     result = {"loss": metric_ops.streaming_mean(loss)}
     if metrics:
         predictions = self.logits_to_predictions(logits, proba=False)
         result.update(
             _run_metrics(predictions, labels, metrics,
                          self.get_weight_tensor(features)))
     return result
예제 #8
0
def _r2(probabilities, targets):
    if targets.get_shape().ndims == 1:
        targets = array_ops.expand_dims(targets, -1)
    y_mean = math_ops.reduce_mean(targets, 0)
    squares_total = math_ops.reduce_sum(math_ops.square(targets - y_mean), 0)
    squares_residuals = math_ops.reduce_sum(
        math_ops.square(targets - probabilities), 0)
    score = 1 - math_ops.reduce_sum(squares_residuals / squares_total)
    return metric_ops.streaming_mean(score)
예제 #9
0
 def get_eval_ops(self, features, logits, labels, metrics=None):
   loss = self.loss(logits, labels, features)
   result = {"loss": metric_ops.streaming_mean(loss)}
   if metrics:
     predictions = self.logits_to_predictions(logits, proba=False)
     result.update(
         _run_metrics(predictions, labels, metrics,
                      self.get_weight_tensor(features)))
   return result
예제 #10
0
파일: model.py 프로젝트: obulpathi/cloud
  def build_graph(self, data_paths, batch_size, is_training):
    """Builds generic graph for training or eval."""
    tensors = GraphReferences()

    _, tensors.examples = util.read_examples(
        data_paths,
        batch_size,
        shuffle=is_training,
        num_epochs=None if is_training else 2)

    parsed = parse_examples(tensors.examples)

    # Build a Graph that computes predictions from the inference model.
    logits = inference(parsed['images'], self.hidden1, self.hidden2)

    # Add to the Graph the Ops for loss calculation.
    loss_value = loss(logits, parsed['labels'])

    # Add to the Graph the Ops for accuracy calculation.
    accuracy_value = evaluation(logits, parsed['labels'])

    # Add to the Graph the Ops that calculate and apply gradients.
    if is_training:
      tensors.train, tensors.global_step = training(loss_value,
                                                    self.learning_rate)
    else:
      tensors.global_step = tf.Variable(0, name='global_step', trainable=False)

    # Add streaming means.
    loss_op, loss_update = metric_ops.streaming_mean(loss_value)
    accuracy_op, accuracy_update = metric_ops.streaming_mean(accuracy_value)

    tf.scalar_summary('accuracy', accuracy_op)
    tf.scalar_summary('loss', loss_op)

    # HYPERPARAMETER TUNING: Write the objective value.
    if not is_training:
      tf.scalar_summary('training/hptuning/metric', accuracy_op)

    tensors.metric_updates = [loss_update, accuracy_update]
    tensors.metric_values = [loss_op, accuracy_op]
    return tensors
예제 #11
0
    def get_eval_ops(self, features, logits, labels, metrics=None):
        loss = self.loss(logits, labels, features)
        result = {"loss": metric_ops.streaming_mean(loss)}

        # Adds default metrics.
        if metrics is None:
            # TODO(b/29366811): This currently results in both an "accuracy" and an
            # "accuracy/threshold_0.500000_mean" metric for binary classification.
            metrics = {("accuracy", "classes"): metric_ops.streaming_accuracy}

        predictions = math_ops.sigmoid(logits)
        labels_float = math_ops.to_float(labels)

        default_metrics = self._default_eval_metrics()
        for metric_name, metric_op in default_metrics.items():
            result[metric_name] = metric_op(predictions, labels_float)

        class_metrics = {}
        proba_metrics = {}
        for name, metric_op in six.iteritems(metrics):
            if isinstance(name, tuple):
                if len(name) != 2:
                    raise ValueError(
                        "Ignoring metric {}. It returned a tuple with "
                        "len {}, expected 2.".format(name, len(name)))
                else:
                    if name[1] not in ["classes", "probabilities"]:
                        raise ValueError(
                            "Ignoring metric {}. The 2nd element of its "
                            "name should be either 'classes' or "
                            "'probabilities'.".format(name))
                    elif name[1] == "classes":
                        class_metrics[name[0]] = metric_op
                    else:
                        proba_metrics[name[0]] = metric_op
            elif isinstance(name, str):
                class_metrics[name] = metric_op
            else:
                raise ValueError(
                    "Ignoring metric {}. Its name is not in the correct "
                    "form.".format(name))
        if class_metrics:
            class_predictions = self.logits_to_predictions(logits, proba=False)
            result.update(
                _run_metrics(class_predictions, labels, class_metrics,
                             self.get_weight_tensor(features)))
        if proba_metrics:
            predictions = self.logits_to_predictions(logits, proba=True)
            result.update(
                _run_metrics(predictions, labels, proba_metrics,
                             self.get_weight_tensor(features)))
        return result
예제 #12
0
  def get_eval_ops(self, features, logits, labels, metrics=None):
    loss = self.loss(logits, labels, features)
    result = {"loss": metric_ops.streaming_mean(loss)}

    # Adds default metrics.
    if metrics is None:
      # TODO(b/29366811): This currently results in both an "accuracy" and an
      # "accuracy/threshold_0.500000_mean" metric for binary classification.
      metrics = {("accuracy", "classes"): metric_ops.streaming_accuracy}

    predictions = math_ops.sigmoid(logits)
    labels_float = math_ops.cast(labels, dtypes.float32)

    default_metrics = self._default_eval_metrics()
    for metric_name, metric_op in default_metrics.items():
      result[metric_name] = metric_op(predictions, labels_float)

    class_metrics = {}
    proba_metrics = {}
    for name, metric_op in six.iteritems(metrics):
      if isinstance(name, tuple):
        if len(name) != 2:
          raise ValueError("Ignoring metric {}. It returned a tuple with "
                           "len {}, expected 2.".format(name, len(name)))
        else:
          if name[1] not in ["classes", "probabilities"]:
            raise ValueError("Ignoring metric {}. The 2nd element of its "
                             "name should be either 'classes' or "
                             "'probabilities'.".format(name))
          elif name[1] == "classes":
            class_metrics[name[0]] = metric_op
          else:
            proba_metrics[name[0]] = metric_op
      elif isinstance(name, str):
        class_metrics[name] = metric_op
      else:
        raise ValueError("Ignoring metric {}. Its name is not in the correct "
                         "form.".format(name))
    if class_metrics:
      class_predictions = self.logits_to_predictions(logits, proba=False)
      result.update(
          _run_metrics(class_predictions, labels, class_metrics,
                       self.get_weight_tensor(features)))
    if proba_metrics:
      predictions = self.logits_to_predictions(logits, proba=True)
      result.update(
          _run_metrics(predictions, labels, proba_metrics,
                       self.get_weight_tensor(features)))
    return result
예제 #13
0
def _sigmoid_entropy(probabilities, targets):
    return metric_ops.streaming_mean(
        losses.sigmoid_cross_entropy(probabilities, targets))
예제 #14
0
def _softmax_entropy(probabilities, targets):
    return metric_ops.streaming_mean(
        losses.softmax_cross_entropy(probabilities, targets))
예제 #15
0
def _predictions_streaming_mean(predictions, unused_labels, weights=None):
    return metric_ops.streaming_mean(predictions, weights=weights)
예제 #16
0
 def _top_k(probabilities, targets):
     return metric_ops.streaming_mean(
         nn.in_top_k(probabilities, math_ops.to_int32(targets), k))
예제 #17
0
def _softmax_entropy(probabilities, targets):
  return metric_ops.streaming_mean(losses.softmax_cross_entropy(
      probabilities, targets))
예제 #18
0
def _sigmoid_entropy(probabilities, targets):
  return metric_ops.streaming_mean(losses.sigmoid_cross_entropy(
      probabilities, targets))
예제 #19
0
def _predictions_streaming_mean(predictions, unused_labels, weights=None):
  return metric_ops.streaming_mean(predictions, weights=weights)
예제 #20
0
def metrics_wr(values, omit):
    weights = tf.ones(shape=())

    return metric_ops.streaming_mean(values, weights)
예제 #21
0
def _log_loss(probabilities, targets):
    # targets doesn't have a shape coming in, log_loss isn't too happy about it.
    targets = array_ops.reshape(targets, array_ops.shape(probabilities))
    return metric_ops.streaming_mean(losses.log_loss(probabilities, targets))
예제 #22
0
 def _top_k(probabilities, targets):
     targets = math_ops.to_int32(targets)
     if targets.get_shape().ndims > 1:
         targets = array_ops.squeeze(targets, squeeze_dims=[1])
     return metric_ops.streaming_mean(nn.in_top_k(probabilities, targets,
                                                  k))
예제 #23
0
 def _top_k(probabilities, targets):
   targets = math_ops.to_int32(targets)
   if targets.get_shape().ndims > 1:
     targets = array_ops.squeeze(targets, squeeze_dims=[1])
   return metric_ops.streaming_mean(nn.in_top_k(probabilities, targets, k))
예제 #24
0
def _softmax_entropy(probabilities, targets, weights=None):
    return metric_ops.streaming_mean(losses.sparse_softmax_cross_entropy(
        probabilities, math_ops.to_int32(targets)),
                                     weights=weights)
예제 #25
0
def _log_loss(probabilities, targets):
  # targets doesn't have a shape coming in, log_loss isn't too happy about it.
  targets = array_ops.reshape(targets, array_ops.shape(probabilities))
  return metric_ops.streaming_mean(losses.log_loss(probabilities, targets))
예제 #26
0
def _softmax_entropy(probabilities, targets, weights=None):
  return metric_ops.streaming_mean(losses.sparse_softmax_cross_entropy(
      probabilities, math_ops.to_int32(targets)),
                                   weights=weights)
예제 #27
0
 def _top_k(probabilities, targets):
   return metric_ops.streaming_mean(nn.in_top_k(probabilities,
                                                math_ops.to_int32(targets), k))
예제 #28
0
def _class_log_loss(probabilities, targets, weights=None):
    return metric_ops.streaming_mean(losses.log_loss(
        probabilities,
        _squeeze_and_onehot(targets,
                            array_ops.shape(probabilities)[1])),
                                     weights=weights)
예제 #29
0
def _class_log_loss(probabilities, targets, weights=None):
  return metric_ops.streaming_mean(
      losses.log_loss(probabilities,
                      _squeeze_and_onehot(targets,
                                          array_ops.shape(probabilities)[1])),
      weights=weights)