Example #1
0
 def step_fn(inputs, targets):
   with backprop.GradientTape() as tape:
     outputs = bn.apply(inputs, training=True)
     loss = losses.mean_squared_error(targets, outputs)
   grads = tape.gradient(loss, bn.variables)
   optimizer.apply_gradients(zip(grads, bn.variables))
   return loss
Example #2
0
    def _model_fn(features, labels, mode):
      predictions = layers.dense(
          features['x'], 1, kernel_initializer=init_ops.zeros_initializer())
      export_outputs = {
          'predictions': export_output.RegressionOutput(predictions)
      }

      if mode == model_fn_lib.ModeKeys.PREDICT:
        return model_fn_lib.EstimatorSpec(
            mode, predictions=predictions, export_outputs=export_outputs)

      loss = losses.mean_squared_error(labels, predictions)
      train_op = training.GradientDescentOptimizer(learning_rate=0.5).minimize(
          loss, training.get_global_step())
      eval_metric_ops = {
          'absolute_error': metrics_lib.mean_absolute_error(
              labels, predictions)
      }

      return model_fn_lib.EstimatorSpec(
          mode,
          predictions=predictions,
          loss=loss,
          train_op=train_op,
          eval_metric_ops=eval_metric_ops,
          export_outputs=export_outputs)
Example #3
0
 def step_fn(inputs, targets):
     with backprop.GradientTape() as tape:
         outputs = bn.apply(inputs, training=True)
         loss = losses.mean_squared_error(targets, outputs)
     grads = tape.gradient(loss, bn.variables)
     optimizer.apply_gradients(zip(grads, bn.variables))
     return loss
    def _model_fn_without_eval_metrics(self, features, labels, mode, params):
        del params  # unused.
        predictions = layers.dense(
            features['x'], 1, kernel_initializer=init_ops.zeros_initializer())
        loss = losses.mean_squared_error(labels, predictions)

        return self._create_head(mode, loss, None)
Example #5
0
    def _model_fn(features, labels, mode):
      predictions = layers.dense(
          features['x'], 1, kernel_initializer=init_ops.zeros_initializer())
      export_outputs = {
          'predictions': export.RegressionOutput(predictions)
      }

      if mode == model_fn_lib.ModeKeys.PREDICT:
        return model_fn_lib.EstimatorSpec(
            mode, predictions=predictions, export_outputs=export_outputs)

      loss = losses.mean_squared_error(labels, predictions)
      train_op = training.GradientDescentOptimizer(learning_rate=0.5).minimize(
          loss, training.get_global_step())
      eval_metric_ops = {
          'absolute_error': metrics_lib.mean_absolute_error(
              labels, predictions)
      }

      return model_fn_lib.EstimatorSpec(
          mode,
          predictions=predictions,
          loss=loss,
          train_op=train_op,
          eval_metric_ops=eval_metric_ops,
          export_outputs=export_outputs)
def mean_squared_error_regressor(tensor_in, labels, weights, biases, name=None):
  """Returns prediction and loss for mean squared error regression."""
  with ops.name_scope(name, 'mean_squared_error_regressor',
                      [tensor_in, labels]):
    predictions = nn.xw_plus_b(tensor_in, weights, biases)
    if len(labels.get_shape()) == 1 and len(predictions.get_shape()) == 2:
      predictions = array_ops_.squeeze(predictions, squeeze_dims=[1])
    return predictions, losses.mean_squared_error(labels, predictions)
Example #7
0
def mean_squared_error_regressor(tensor_in, labels, weights, biases, name=None):
  """Returns prediction and loss for mean squared error regression."""
  with ops.name_scope(name, 'mean_squared_error_regressor',
                      [tensor_in, labels]):
    predictions = nn.xw_plus_b(tensor_in, weights, biases)
    if len(labels.get_shape()) == 1 and len(predictions.get_shape()) == 2:
      predictions = array_ops_.squeeze(predictions, squeeze_dims=[1])
    return predictions, losses.mean_squared_error(labels, predictions)
Example #8
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        # Predict.
        with ops.name_scope('head'):
            logits = _check_logits(logits, self._logits_dimension)
            predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
            if mode == model_fn.ModeKeys.PREDICT:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        '': export_output.RegressionOutput(value=logits)
                    })

            # Eval.
            labels = _check_labels(
                _maybe_expand_dim(math_ops.to_float(labels)),
                self._logits_dimension)
            unweighted_loss = losses.mean_squared_error(
                labels=labels,
                predictions=logits,
                reduction=losses.Reduction.NONE)
            weights = _weights(features, self._weight_column)
            training_loss = losses.compute_weighted_loss(
                unweighted_loss,
                weights=weights,
                reduction=losses.Reduction.SUM)
            if mode == model_fn.ModeKeys.EVAL:
                # Estimator already adds a metric for loss.
                eval_metric_ops = {
                    metric_keys.MetricKeys.LOSS_MEAN:
                    metrics_lib.mean(unweighted_loss, weights=weights)
                }
                return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.EVAL,
                                              predictions=predictions,
                                              loss=training_loss,
                                              eval_metric_ops=eval_metric_ops)

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
        with ops.name_scope(''):
            summary.scalar(metric_keys.MetricKeys.LOSS, training_loss)
            summary.scalar(
                metric_keys.MetricKeys.LOSS_MEAN,
                losses.compute_weighted_loss(unweighted_loss,
                                             weights=weights,
                                             reduction=losses.Reduction.MEAN))
        return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                      predictions=predictions,
                                      loss=training_loss,
                                      train_op=train_op_fn(training_loss))
Example #9
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode, features  # Unused for this head.
   labels = _check_and_reshape_dense_labels(labels, self._logits_dimension)
   labels = math_ops.to_float(labels)
   return LossAndLabels(
       unweighted_loss=losses.mean_squared_error(
           labels=labels, predictions=logits, reduction=losses.Reduction.NONE),
       processed_labels=labels)
Example #10
0
 def model(scale, y, label):
     with variable_scope.variable_scope("vs", use_resource=True):
         x = variable_scope.get_variable(
             "x",
             shape=[SIZE],
             initializer=init_ops.ones_initializer(),
             dtype=np.float32)
     z = math_ops.reduce_mean(scaled_add_op(x, scale, y), axis=1)
     loss = losses.mean_squared_error(label, z)
     return loss, gradient_descent.GradientDescentOptimizer(
         0.01).minimize(loss)
Example #11
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with variable_scope.variable_scope(
        None,
        default_name='regression_head',
        values=(tuple(six.itervalues(features)) + (labels, logits))):

      # Predict.
      logits = _check_logits(logits, self._logits_dimension)
      predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
      if mode == model_fn.ModeKeys.PREDICT:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={'': export_output.RegressionOutput(value=logits)})

      # Eval.
      labels = _check_labels(_maybe_expand_dim(math_ops.to_float(labels)),
                             self._logits_dimension)
      unweighted_loss = losses.mean_squared_error(
          labels=labels, predictions=logits, reduction=losses.Reduction.NONE)
      weights = (
          1. if (self._weight_feature_key is None) else
          features[self._weight_feature_key])
      weights = _maybe_expand_dim(math_ops.to_float(weights, name='weights'))
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        # Estimator already adds a metric for loss.
        eval_metric_ops = {
            metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(
                unweighted_loss, weights=weights)
        }
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=eval_metric_ops)

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
      logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS, training_loss)
      logging_ops.scalar_summary(
          metric_keys.MetricKeys.LOSS_MEAN,
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
      return model_fn.EstimatorSpec(
          mode=model_fn.ModeKeys.TRAIN,
          predictions=predictions,
          loss=training_loss,
          train_op=train_op_fn(training_loss))
    def _model_fn_with_eval_dict(self, features, labels, mode, params):
        del params  # unused.
        predictions = layers.dense(
            features['x'], 1, kernel_initializer=init_ops.zeros_initializer())
        loss = losses.mean_squared_error(labels, predictions)

        return self._create_head(mode,
                                 loss,
                                 eval_metrics=(self._metric_fn_on_cpu, {
                                     'labels': labels,
                                     'predictions': predictions
                                 }))
        def model_fn(features, labels, mode, params):
            del params
            x = core_layers.dense(features, 100)
            x = core_layers.dense(x, 100)
            x = core_layers.dense(x, 100)
            x = core_layers.dense(x, 100)
            y = core_layers.dense(x, 1)
            loss = losses.mean_squared_error(labels, y)
            opt = adam.AdamOptimizer(learning_rate=0.1)
            train_op = opt.minimize(
                loss, global_step=training_util.get_or_create_global_step())

            return EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
Example #14
0
      def step_fn(is_training, inputs, targets=None):
        bn = normalization.BatchNormalization(
            axis=3, epsilon=1e-3, momentum=0.9, fused=fused)
        bn_list.append(bn)
        outputs = bn.apply(inputs, training=is_training)
        if not is_training:
          return outputs

        loss = losses.mean_squared_error(targets, outputs)
        optimizer = gradient_descent.GradientDescentOptimizer(0.01)
        train_op = optimizer.minimize(loss)
        with ops.control_dependencies([train_op]):
          return array_ops.identity(loss)
Example #15
0
 def _unweighted_loss_and_weights(self, logits, labels, features):
   """Computes loss spec."""
   if self._loss_fn:
     unweighted_loss = base_head.call_loss_fn(
         loss_fn=self._loss_fn, labels=labels, logits=logits,
         features=features, expected_loss_dim=self._logits_dimension)
   else:
     unweighted_loss = losses.mean_squared_error(
         labels=labels, predictions=logits, reduction=losses.Reduction.NONE)
   weights = base_head.get_weights_and_check_match_logits(
       features=features, weight_column=self._weight_column, logits=logits,
       allow_per_logit_weights=True)
   return unweighted_loss, weights
    def model_fn(features, labels, mode, params):
        loss = None
        train_op = None
        export_outputs = None

        # This could be some pre-processing on CPU like calls to input layer with
        # embedding columns.
        x2 = features['x'] * 2

        def computation(input_tensor):
            return layers.dense(
                input_tensor,
                1,
                kernel_initializer=init_ops.zeros_initializer())

        if mode != _PREDICT:
            predictions = computation(x2)
            loss = losses.mean_squared_error(labels, predictions)
            optimizer = tf.tpu.CrossShardOptimizer(
                training.GradientDescentOptimizer(learning_rate=0.5))
            train_op = optimizer.minimize(loss, training.get_global_step())
        else:
            inputs = [x2]
            if params['use_tpu']:
                predictions = array_ops.identity(
                    tpu_estimator.inference_on_tpu(computation,
                                                   inputs,
                                                   num_batch_threads=1,
                                                   max_batch_size=2,
                                                   batch_timeout_micros=100),
                    name='predictions')
            else:
                predictions = array_ops.identity(computation(*inputs),
                                                 name='predictions')
            key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
            export_outputs = {
                key: export_lib.PredictOutput({'prediction': predictions})
            }

            classes = string_ops.as_string(predictions, name='classes')
            classification_output = export_lib.ClassificationOutput(
                classes=classes)
            export_outputs['classification'] = classification_output

        return tpu_estimator.TPUEstimatorSpec(
            mode,
            loss=loss,
            train_op=train_op,
            predictions={'predictions': predictions},
            export_outputs=export_outputs)
Example #17
0
            def step_fn(is_training, inputs, targets=None):
                bn = normalization.BatchNormalization(axis=3,
                                                      epsilon=1e-3,
                                                      momentum=0.9,
                                                      fused=fused)
                bn_list.append(bn)
                outputs = bn.apply(inputs, training=is_training)
                if not is_training:
                    return outputs

                loss = losses.mean_squared_error(targets, outputs)
                optimizer = gradient_descent.GradientDescentOptimizer(0.01)
                train_op = optimizer.minimize(loss)
                with ops.control_dependencies([train_op]):
                    return array_ops.identity(loss)
Example #18
0
        def my_model_fn(features, labels, mode):
            self.assertEqual(model_fn_lib.ModeKeys.TRAIN, mode)

            with variable_scope.variable_scope("vs", use_resource=True):
                predictions = layers.Dense(units=1)(features)

            loss = losses.mean_squared_error(labels=labels,
                                             predictions=predictions)
            sharded_optimizer_obj = sharded_optimizer.ShardedOptimizer(
                gradient_descent.GradientDescentOptimizer(0.1))
            train_op = sharded_optimizer_obj.minimize(loss)

            return model_fn_lib.EstimatorSpec(mode=mode,
                                              loss=loss,
                                              train_op=train_op)
Example #19
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    # Predict.
    with ops.name_scope('head'):
      logits = _check_logits(logits, self._logits_dimension)
      predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
      if mode == model_fn.ModeKeys.PREDICT:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={'': export_output.RegressionOutput(value=logits)})

      # Eval.
      labels = _check_labels(_maybe_expand_dim(math_ops.to_float(labels)),
                             self._logits_dimension)
      unweighted_loss = losses.mean_squared_error(
          labels=labels, predictions=logits, reduction=losses.Reduction.NONE)
      weights = _weights(features, self._weight_column)
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        # Estimator already adds a metric for loss.
        eval_metric_ops = {
            metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(
                unweighted_loss, weights=weights)
        }
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=eval_metric_ops)

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
    with ops.name_scope(''):
      summary.scalar(metric_keys.MetricKeys.LOSS, training_loss)
      summary.scalar(metric_keys.MetricKeys.LOSS_MEAN,
                     losses.compute_weighted_loss(
                         unweighted_loss,
                         weights=weights,
                         reduction=losses.Reduction.MEAN))
    return model_fn.EstimatorSpec(
        mode=model_fn.ModeKeys.TRAIN,
        predictions=predictions,
        loss=training_loss,
        train_op=train_op_fn(training_loss))
        def _model_fn(features, labels, mode, params):
            if not self._export_mode:
                # Always check batch size in params
                self.assertEqual(batch_size_dict[mode], params['batch_size'])
            else:
                self.assertNotIn('batch_size', params)

            # Check the input feeds correct shape for train and eval. When eval on CPU
            # or predict, it is allowed to have dynamic shape. So, here only validates
            # the fully known shape (which covers the TPU train).
            if features['x'].shape.is_fully_defined():
                self.assertEqual(batch_size_dict[mode], features['x'].shape[0])

            predictions = layers.dense(
                features['x'],
                1,
                kernel_initializer=init_ops.ones_initializer())
            export_outputs = {
                'predictions': export_output.RegressionOutput(predictions)
            }

            if mode == _PREDICT:
                return _create_estimator_spec(
                    mode=mode,
                    predictions={'predictions': predictions},
                    export_outputs=export_outputs)

            loss = losses.mean_squared_error(labels, predictions)

            optimizer = tf.tpu.CrossShardOptimizer(
                training.GradientDescentOptimizer(learning_rate=0.5))
            train_op = optimizer.minimize(
                loss, global_step=training.get_global_step())

            eval_metrics = (
                lambda labels, predictions: {  # pylint: disable=g-long-lambda
                    'absolute_error':
                    metrics_lib.mean_absolute_error(labels, predictions)
                },
                [labels, predictions])
            return _create_estimator_spec(
                mode=mode,
                loss=loss,
                predictions={'predictions': predictions},
                export_outputs=export_outputs,
                train_op=train_op,
                eval_metrics=eval_metrics)
def model_fn_global_step_incrementer(features, labels, mode, params):
    del params
    loss = None
    train_op = None
    predictions = dense_computation(features)
    if mode != _PREDICT:
        loss = losses.mean_squared_error(labels, predictions)
        optimizer = tf.tpu.CrossShardOptimizer(
            training.GradientDescentOptimizer(learning_rate=0.5))
        train_op = optimizer.minimize(loss, training.get_global_step())
    return tpu_estimator.TPUEstimatorSpec(
        mode,
        loss=loss,
        train_op=train_op,
        predictions={'predictions': predictions},
        export_outputs={
            'test': export_output.PredictOutput({'prediction': predictions})
        })
Example #22
0
        def my_net(a, b):
            c = variable_scope.get_variable('c', initializer=[1.0])
            self.assertTrue("ResourceVariable" in str(type(c)))

            lstm_cell = rnn_cell.LSTMCell(1, forget_bias=1.0)
            outputs, _ = rnn.dynamic_rnn(lstm_cell, a, dtype=np.float32)

            logits = outputs[-1] * c
            self.assertEqual(logits.device, "/device:IPU:0")

            res = array_ops.reshape(logits, [1, 8, 1])

            l = losses.mean_squared_error(res, b)

            optimizer = gradient_descent.GradientDescentOptimizer(0.1)
            train = optimizer.minimize(l)

            return [l, train]
Example #23
0
    def create_loss2(self, features, mode, logits, labels):
        """See `Head`."""
        del mode  # Unused for this head.
        print(labels)
        labels = head_lib._check_dense_labels_match_logits_and_reshape(
            labels=labels, logits=logits, expected_labels_dimension=1)

        labels = math_ops.to_float(labels)
        unweighted_loss = losses.mean_squared_error(
            labels=labels,
            predictions=logits,
            reduction=losses.Reduction.NONE)
        training_loss = losses.compute_weighted_loss(
            unweighted_loss, reduction=self._loss_reduction)

        return LossSpec(
            training_loss=training_loss,
            unreduced_loss=unweighted_loss,
            processed_labels=labels)
Example #24
0
def mse(labels, predictions, weights=1.0):
    """ Mean Squared Error (MSE)

    Measures the average of the squares of the errors - the difference between an estimator and what is estimated.
    This is a risk function, corresponding to the expected value of the quadratic loss. Like variance, mean squared
    error has the disadvantage of heavily weighting outliers.


    Args:
        weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to
        `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension).
        predictions: a tensor with the estimated target values
        labels: ground truth, correct values

    Returns:
        ``Tensor``: a float ``Tensor``.

    """
    return mean_squared_error(labels, predictions, weights)
Example #25
0
  def model_fn(features, labels, mode, params):
    del params
    loss = None
    train_op = None
    predictions = dense_computation(features)
    export_outputs = None
    if mode != _PREDICT:
      loss = losses.mean_squared_error(labels, predictions)
      optimizer = tf.tpu.CrossShardOptimizer(
          training.GradientDescentOptimizer(learning_rate=0.5))
      train_op = optimizer.minimize(loss, training.get_global_step())
    else:
      if export_tpu_tensor:
        key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
        export_outputs = {
            key: export_lib.PredictOutput({
                'prediction': predictions
            })
        }
      else:
        export_outputs = {}

      if export_cpu_tensor:

        def host_call(predictions):
          return string_ops.as_string(predictions, name='classes')

        classes = tf.tpu.outside_compilation(host_call, predictions)
        classification_output = export_lib.ClassificationOutput(
            classes=classes)
        export_outputs['classification'] = classification_output

    if tpu_estimator_spec:
      spec_type = tpu_estimator.TPUEstimatorSpec
    else:
      spec_type = model_fn_lib.EstimatorSpec

    return spec_type(
        mode,
        loss=loss,
        train_op=train_op,
        predictions={'predictions': predictions},
        export_outputs=export_outputs)
Example #26
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode  # Unused for this head.
   logits = ops.convert_to_tensor(logits)
   labels = _check_dense_labels_match_logits_and_reshape(
       labels=labels, logits=logits,
       expected_labels_dimension=self._logits_dimension)
   labels = math_ops.to_float(labels)
   unweighted_loss = losses.mean_squared_error(
       labels=labels, predictions=logits, reduction=losses.Reduction.NONE)
   weights = _get_weights_and_check_match_logits(
       features=features, weight_column=self._weight_column, logits=logits,
       allow_per_logit_weights=True)
   weighted_sum_loss = losses.compute_weighted_loss(
       unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
   # _weights() can return 1.
   example_weight_sum = math_ops.reduce_sum(
       weights * array_ops.ones_like(unweighted_loss))
   return LossSpec(
       weighted_sum_loss=weighted_sum_loss,
       example_weight_sum=example_weight_sum,
       processed_labels=labels)
Example #27
0
def _mean_squared_loss(labels,
                       logits,
                       weights=None,
                       reduction=core_losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
                       name=None):
    """Computes the mean squared loss for a list.

  Given the labels of graded relevance l_i and the logits s_i, we calculate
  the squared error for each ith position and aggregate the per position
  losses.

  Args:
    labels: A `Tensor` of the same shape as `logits` representing graded
      relevance.
    logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
      ranking score of the corresponding item.
    weights: A scalar, a `Tensor` with shape [batch_size, 1] for list-wise
      weights, or a `Tensor` with shape [batch_size, list_size] for item-wise
      weights.
    reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
      reduce training loss over batch.
    name: A string used as the name for this loss.

  Returns:
    An op for the mean squared error as a loss.
  """
    with ops.name_scope(name, 'mean_squared_loss', (labels, logits, weights)):
        is_label_valid = array_ops.reshape(utils.is_label_valid(labels), [-1])
        weights = 1.0 if weights is None else ops.convert_to_tensor(weights)
        weights = array_ops.ones_like(labels) * weights
        label_vector, logit_vector, weight_vector = [
            array_ops.boolean_mask(array_ops.reshape(x, [-1]), is_label_valid)
            for x in [labels, logits, weights]
        ]
        return core_losses.mean_squared_error(label_vector,
                                              logit_vector,
                                              weights=weight_vector,
                                              reduction=reduction)
Example #28
0
        def my_model_fn(features, labels, mode):
            self.assertEqual(model_fn_lib.ModeKeys.TRAIN, mode)

            with variable_scope.variable_scope("vs", use_resource=True):
                with ipu.scopes.ipu_shard(0):
                    out_0 = layers.Dense(units=1)(features)

                with ipu.scopes.ipu_shard(1):
                    predictions = layers.Dense(units=1)(out_0)

            loss = losses.mean_squared_error(labels=labels,
                                             predictions=predictions)
            optimizer = gradient_descent.GradientDescentOptimizer(0.1)
            sharded_optimizer_obj = sharded_optimizer.ShardedOptimizer(
                optimizer)
            cross_replica_optimizer_obj = \
            cross_replica_optimizer.CrossReplicaOptimizer(
                sharded_optimizer_obj)
            train_op = cross_replica_optimizer_obj.minimize(loss)

            return model_fn_lib.EstimatorSpec(mode=mode,
                                              loss=loss,
                                              train_op=train_op)
Example #29
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode  # Unused for this head.
   logits = ops.convert_to_tensor(logits)
   labels = _check_dense_labels_match_logits_and_reshape(
       labels=labels, logits=logits,
       expected_labels_dimension=self._logits_dimension)
   labels = math_ops.to_float(labels)
   unweighted_loss = losses.mean_squared_error(
       labels=labels, predictions=logits, reduction=losses.Reduction.NONE)
   weights = _weights(features, self._weight_column)
   if self._weight_column is not None:
     weights = _check_weights_match_logits_and_reshape(
         weights=weights, logits=logits)
   weighted_sum_loss = losses.compute_weighted_loss(
       unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
   # _weights() can return 1.
   example_weight_sum = math_ops.reduce_sum(
       weights * array_ops.ones_like(unweighted_loss))
   return LossSpec(
       weighted_sum_loss=weighted_sum_loss,
       example_weight_sum=example_weight_sum,
       processed_labels=labels)
Example #30
0
def visual_feature_regularizer(
        # structured_generator_inputs,
        # predicted_distributions,
        model,
        weights=1.0,
        scope=None,
        loss_collection=ops.GraphKeys.LOSSES,
        reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
        add_summaries=False):
    """Returns a penalty on the mutual information in an InfoGAN model.

  This loss comes from an InfoGAN paper https://arxiv.org/abs/1606.03657.

  Args:
    structured_generator_inputs: A list of Tensors representing the random noise
      that must  have high mutual information with the generator output. List
      length should match `predicted_distributions`.
    predicted_distributions: A list of tf.Distributions. Predicted by the
      recognizer, and used to evaluate the likelihood of the structured noise.
      List length should match `structured_generator_inputs`.
    weights: Optional `Tensor` whose rank is either 0, or the same dimensions as
      `structured_generator_inputs`.
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which this loss will be added.
    reduction: A `tf.losses.Reduction` to apply to loss.
    add_summaries: Whether or not to add summaries for the loss.

  Returns:
    A scalar Tensor representing the mutual information loss.
  """
    # _validate_information_penalty_inputs(
    #     structured_generator_inputs, predicted_distributions)

    #mutual information between visual feature and normal distribution to have myu 1 or -1
    # Calculate the negative log-likelihood of the reconstructed noise.

    visual_features = model.visual_features
    feature_list = model.feature_list
    loss = {}
    for key in [
            key for key in visual_features.keys()
            if key in feature_list['continuous'].keys()
    ]:
        label = tf.ones_like(visual_features[key]['left'])
        loss[key] = losses.mean_squared_error(
            -label,
            visual_features[key]['left'],
            weights,
            scope,
            loss_collection=loss_collection,
            reduction=reduction) + losses.mean_squared_error(
                label,
                visual_features[key]['right'],
                weights,
                scope,
                loss_collection=loss_collection,
                reduction=reduction)

    for key in [
            key for key in visual_features.keys()
            if key in feature_list['discrete'].keys()
    ]:
        loss[key] = 0
        category_num = len(feature_list['discrete'][key])
        for attribute in feature_list['discrete'][key]:
            onehot_labels = tf.one_hot(
                [attribute] * visual_features[key][attribute].shape[1],
                category_num)
            loss[key] += losses.softmax_cross_entropy(
                onehot_labels,
                visual_features[key][attribute],
                weights,
                0,
                scope,
                loss_collection=loss_collection,
                reduction=reduction)

            # print(attribute, " : ", [attribute]*visual_features[key][attribute].shape[1])
            # print(visual_features[key][attribute])

    if add_summaries:
        for key in loss.keys():
            summary.scalar('loss_' + key, loss[key])

    return loss['rotation'] + loss['width'] + 2 * loss['category']