Esempio n. 1
0
    def model_fn(features, labels, mode, config):
        """Model function for custom estimator."""
        del config
        predictions = features['prediction']

        if output_prediction_key is not None:
            predictions_dict = {
                output_prediction_key: predictions,
            }
        else:
            # For simulating Estimators which don't return a predictions dict in
            # EVAL mode.
            predictions_dict = {}

        if mode == tf_estimator.ModeKeys.PREDICT:
            return tf_estimator.EstimatorSpec(
                mode=mode,
                predictions=predictions_dict,
                export_outputs={
                    tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    tf_estimator.export.RegressionOutput(predictions)
                })

        loss = tf.compat.v1.losses.mean_squared_error(predictions, labels)
        train_op = tf.compat.v1.assign_add(
            tf.compat.v1.train.get_global_step(), 1)
        eval_metric_ops = {
            metric_keys.MetricKeys.LOSS_MEAN: tf.compat.v1.metrics.mean(loss),
        }

        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          train_op=train_op,
                                          predictions=predictions_dict,
                                          eval_metric_ops=eval_metric_ops)
Esempio n. 2
0
    def model_fn(features, labels, mode, config):
        """Model function for custom estimator."""
        del config
        predictions = features['prediction']
        predictions_dict = {
            prediction_keys.PredictionKeys.PREDICTIONS: predictions,
        }

        if mode == tf_estimator.ModeKeys.PREDICT:
            return tf_estimator.EstimatorSpec(
                mode=mode,
                predictions=predictions_dict,
                export_outputs={
                    tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    tf_estimator.export.RegressionOutput(predictions)
                })

        loss = tf.compat.v1.losses.mean_squared_error(predictions, labels)
        train_op = tf.compat.v1.assign_add(
            tf.compat.v1.train.get_global_step(), 1)

        eval_metric_ops = {}
        if include_metrics:
            eval_metric_ops[metric_keys.MetricKeys.
                            LOSS_MEAN] = tf.compat.v1.metrics.mean(loss)

        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          train_op=train_op,
                                          predictions=predictions_dict,
                                          eval_metric_ops=eval_metric_ops)
Esempio n. 3
0
def _model_fn(features, labels, mode, config):
    """Model function for custom estimator."""

    del config  # Unused.

    predictions = tf.cast(features['input_index'], tf.float32)
    if mode == tf_estimator.ModeKeys.PREDICT:
        return tf_estimator.EstimatorSpec(
            mode=mode,
            predictions=predictions,
            export_outputs={
                tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                tf_estimator.export.RegressionOutput(predictions)
            })

    loss = tf.compat.v1.losses.mean_squared_error(features['example_count'],
                                                  labels)
    train_op = tf.compat.v1.assign_add(tf.compat.v1.train.get_global_step(), 1)
    eval_metric_ops = {
        metric_keys.MetricKeys.LOSS_MEAN: tf.compat.v1.metrics.mean(loss),
    }

    return tf_estimator.EstimatorSpec(mode=mode,
                                      loss=loss,
                                      train_op=train_op,
                                      predictions=predictions,
                                      eval_metric_ops=eval_metric_ops)
def model_fn(features, labels, mode, config):
    """Model function for custom estimator."""
    del labels
    del config
    classes = tf.sparse.to_dense(features['classes'], default_value='?')
    scores = tf.sparse.to_dense(features['scores'], default_value=0.0)

    predictions = {
        prediction_keys.PredictionKeys.LOGITS: scores,
        prediction_keys.PredictionKeys.PROBABILITIES: scores,
        prediction_keys.PredictionKeys.CLASSES: classes,
    }

    if mode == tf_estimator.ModeKeys.PREDICT:
        return tf_estimator.EstimatorSpec(
            mode=mode,
            predictions=predictions,
            export_outputs={
                tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                tf_estimator.export.ClassificationOutput(scores=scores,
                                                         classes=classes),
            })

    # Note that this is always going to be 0.
    loss = tf.compat.v1.losses.mean_squared_error(scores, tf.ones_like(scores))
    train_op = tf.compat.v1.assign_add(tf.compat.v1.train.get_global_step(), 1)
    eval_metric_ops = {
        metric_keys.MetricKeys.LOSS_MEAN: tf.compat.v1.metrics.mean(loss),
    }

    return tf_estimator.EstimatorSpec(mode=mode,
                                      loss=loss,
                                      train_op=train_op,
                                      predictions=predictions,
                                      eval_metric_ops=eval_metric_ops)
Esempio n. 5
0
        def model_fn(features, labels, mode, params):
            """Estimator model_fn produced by ModelBuilder.

      Args:
        features: passed to config_model_prediction
        labels: passed to config_model_[training|evaluation]
        mode: from tf.estimator.ModeKeys
        params: dict of options passed to config_* methods

      Returns:
        a Python function
      """
            # initialize and partially configure the model
            m = Model()
            m.context = self.build_context(params=params)

            self.config_model_prediction(m, features, params=params)
            if mode == tf_estimator.ModeKeys.PREDICT:
                return tf_estimator.EstimatorSpec(mode=mode,
                                                  predictions=m.predictions)

            self.config_model_training(m, labels, params)
            if mode == tf_estimator.ModeKeys.TRAIN:
                return tf_estimator.EstimatorSpec(mode=mode,
                                                  train_op=m.train_op,
                                                  loss=m.loss)

            self.config_model_evaluation(m, labels, params)
            if mode == tf_estimator.ModeKeys.EVAL:
                return tf_estimator.EstimatorSpec(
                    mode=mode, loss=m.loss, eval_metric_ops=m.evaluations)

            raise ValueError('illegal mode %r' % mode)
Esempio n. 6
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              regularization_losses=None):
        """See `_AbstractRankingHead`."""
        logits = tf.convert_to_tensor(value=logits)
        # Predict.
        with tf.compat.v1.name_scope(self._name, 'head'):
            if mode == tf_estimator.ModeKeys.PREDICT:
                return tf_estimator.EstimatorSpec(
                    mode=mode,
                    predictions=logits,
                    export_outputs={
                        _DEFAULT_SERVING_KEY:
                        tf_estimator.export.RegressionOutput(logits),
                        _REGRESS_SERVING_KEY:
                        tf_estimator.export.RegressionOutput(logits),
                        _PREDICT_SERVING_KEY:
                        tf_estimator.export.PredictOutput(logits),
                    })

            training_loss = self.create_loss(features=features,
                                             mode=mode,
                                             logits=logits,
                                             labels=labels)
            if regularization_losses:
                regularization_loss = tf.add_n(regularization_losses)
                regularized_training_loss = tf.add(training_loss,
                                                   regularization_loss)
            else:
                regularized_training_loss = training_loss

            # Eval.
            if mode == tf_estimator.ModeKeys.EVAL:
                eval_metric_ops = {
                    name: metric_fn(labels=labels,
                                    predictions=logits,
                                    features=features)
                    for name, metric_fn in six.iteritems(self._eval_metric_fns)
                }
                eval_metric_ops.update(
                    self._labels_and_logits_metrics(labels, logits))
                return tf_estimator.EstimatorSpec(
                    mode=mode,
                    predictions=logits,
                    loss=regularized_training_loss,
                    eval_metric_ops=eval_metric_ops)

            # Train.
            if mode == tf_estimator.ModeKeys.TRAIN:
                return tf_estimator.EstimatorSpec(
                    mode=mode,
                    loss=regularized_training_loss,
                    train_op=_get_train_op(regularized_training_loss,
                                           self._train_op_fn, self._optimizer),
                    predictions=logits)
            raise ValueError('mode={} unrecognized'.format(mode))
Esempio n. 7
0
def nn_model_fn(features, labels, mode):
    """Define NN architecture using tf.keras.layers."""
    input_layer = tf.reshape(features['x'], [-1, maxlen])
    y = tf.keras.layers.Embedding(max_features, 16).apply(input_layer)
    y = tf.keras.layers.GlobalAveragePooling1D().apply(y)
    y = tf.keras.layers.Dense(16, activation='relu').apply(y)
    logits = tf.keras.layers.Dense(2).apply(y)

    # Calculate loss as a vector (to support microbatches in DP-SGD).
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                                 logits=logits)
    # Define mean of loss across minibatch (for reporting through tf.Estimator).
    scalar_loss = tf.reduce_mean(vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf_estimator.ModeKeys.TRAIN:
        if FLAGS.dpsgd:
            # Use DP version of GradientDescentOptimizer. Other optimizers are
            # available in dp_optimizer. Most optimizers inheriting from
            # tf.train.Optimizer should be wrappable in differentially private
            # counterparts by calling dp_optimizer.optimizer_from_args().
            optimizer = dp_optimizer.DPAdamGaussianOptimizer(
                l2_norm_clip=FLAGS.l2_norm_clip,
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=microbatches,
                learning_rate=FLAGS.learning_rate)
            opt_loss = vector_loss
        else:
            optimizer = tf.compat.v1.train.AdamOptimizer(
                learning_rate=FLAGS.learning_rate)
            opt_loss = scalar_loss

        global_step = tf.compat.v1.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
        # In the following, we pass the mean of the loss (scalar_loss) rather than
        # the vector_loss because tf.estimator requires a scalar loss. This is only
        # used for evaluation and debugging by tf.estimator. The actual loss being
        # minimized is opt_loss defined above and passed to optimizer.minimize().
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    if mode == tf_estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.compat.v1.metrics.accuracy(labels=labels,
                                          predictions=tf.argmax(input=logits,
                                                                axis=1))
        }
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)
    return None
Esempio n. 8
0
def lr_model_fn(features, labels, mode, nclasses, dim):
    """Model function for logistic regression."""
    input_layer = tf.reshape(features['x'], tuple([-1]) + dim)
    logits = tf.keras.layers.Dense(
        units=nclasses,
        kernel_regularizer=tf.keras.regularizers.L2(l2=FLAGS.regularizer),
        bias_regularizer=tf.keras.regularizers.L2(
            l2=FLAGS.regularizer)).apply(input_layer)

    # Calculate loss as a vector (to support microbatches in DP-SGD).
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits) + tf.losses.get_regularization_loss()
    # Define mean of loss across minibatch (for reporting through tf.Estimator).
    scalar_loss = tf.reduce_mean(vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf_estimator.ModeKeys.TRAIN:
        if FLAGS.dpsgd:
            # The loss function is L-Lipschitz with L = sqrt(2*(||x||^2 + 1)) where
            # ||x|| is the norm of the data.
            # We don't use microbatches (thus speeding up computation), since no
            # clipping is necessary due to data normalization.
            optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
                l2_norm_clip=math.sqrt(2 * (FLAGS.data_l2_norm**2 + 1)),
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=1,
                learning_rate=FLAGS.learning_rate)
            opt_loss = vector_loss
        else:
            optimizer = GradientDescentOptimizer(
                learning_rate=FLAGS.learning_rate)
            opt_loss = scalar_loss
        global_step = tf.compat.v1.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
        # In the following, we pass the mean of the loss (scalar_loss) rather than
        # the vector_loss because tf.estimator requires a scalar loss. This is only
        # used for evaluation and debugging by tf.estimator. The actual loss being
        # minimized is opt_loss defined above and passed to optimizer.minimize().
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf_estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.metrics.accuracy(labels=labels,
                                predictions=tf.argmax(input=logits, axis=1))
        }
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)
    def model_fn(features, labels, mode, config):
        """Model function for custom estimator."""
        del config
        predictions = features['prediction']
        predictions_dict = {
            prediction_keys.PredictionKeys.PREDICTIONS: predictions,
        }

        if mode == tf_estimator.ModeKeys.PREDICT:
            return tf_estimator.EstimatorSpec(
                mode=mode,
                predictions=predictions_dict,
                export_outputs={
                    tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    tf_estimator.export.RegressionOutput(predictions)
                })

        loss = tf.compat.v1.losses.mean_squared_error(predictions,
                                                      labels['actual_label'])
        train_op = tf.compat.v1.assign_add(
            tf.compat.v1.train.get_global_step(), 1)

        eval_metric_ops = {}
        if mode == tf_estimator.ModeKeys.EVAL:
            eval_metric_ops = {
                metric_keys.MetricKeys.LOSS_MEAN:
                tf.compat.v1.metrics.mean(loss),
                'control_dependency_on_fixed_float':
                control_dependency_metric(1.0, features['fixed_float']),
                # Introduce a direct dependency on the values Tensor. If we
                # introduce another intervening op like sparse_tensor_to_dense then
                # regardless of whether TFMA correctly wrap SparseTensors we will not
                # encounter the TF bug.
                'control_dependency_on_var_float':
                control_dependency_metric(10.0, features['var_float'].values),
                'control_dependency_on_actual_label':
                control_dependency_metric(100.0, labels['actual_label']),
                'control_dependency_on_var_int_label':
                control_dependency_metric(1000.0, labels['var_int'].values),
                # Note that TFMA does *not* wrap predictions, so in most cases
                # if there's a control dependency on predictions they will be
                # recomputed.
                'control_dependency_on_prediction':
                control_dependency_metric(10000.0, predictions),
            }

        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          train_op=train_op,
                                          predictions=predictions_dict,
                                          eval_metric_ops=eval_metric_ops)
Esempio n. 10
0
def cnn_model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
    """Model function for a CNN."""

    # Define CNN architecture.
    logits = common.get_cnn_model(features)

    # Calculate loss as a vector (to support microbatches in DP-SGD).
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                                 logits=logits)
    # Define mean of loss across minibatch (for reporting through tf.Estimator).
    scalar_loss = tf.reduce_mean(input_tensor=vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf_estimator.ModeKeys.TRAIN:
        if FLAGS.dpsgd:
            # Use DP version of GradientDescentOptimizer. Other optimizers are
            # available in dp_optimizer. Most optimizers inheriting from
            # tf.compat.v1.train.Optimizer should be wrappable in differentially
            # private counterparts by calling dp_optimizer.optimizer_from_args().
            optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
                l2_norm_clip=FLAGS.l2_norm_clip,
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=FLAGS.microbatches,
                learning_rate=FLAGS.learning_rate)
            opt_loss = vector_loss
        else:
            optimizer = tf.compat.v1.train.GradientDescentOptimizer(
                learning_rate=FLAGS.learning_rate)
            opt_loss = scalar_loss

        global_step = tf.compat.v1.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)

        # In the following, we pass the mean of the loss (scalar_loss) rather than
        # the vector_loss because tf.estimator requires a scalar loss. This is only
        # used for evaluation and debugging by tf.estimator. The actual loss being
        # minimized is opt_loss defined above and passed to optimizer.minimize().
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf_estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.metrics.accuracy(labels=labels,
                                predictions=tf.argmax(input=logits, axis=1))
        }
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)
Esempio n. 11
0
def rnn_model_fn(features, labels, mode):  # pylint: disable=unused-argument
    """Model function for a RNN."""

    # Define RNN architecture using tf.keras.layers.
    x = features['x']
    x = tf.reshape(x, [-1, SEQ_LEN])
    input_layer = x[:, :-1]
    input_one_hot = tf.one_hot(input_layer, 256)
    lstm = tf.keras.layers.LSTM(256,
                                return_sequences=True).apply(input_one_hot)
    logits = tf.keras.layers.Dense(256).apply(lstm)

    # Calculate loss as a vector (to support microbatches in DP-SGD).
    vector_loss = tf.nn.softmax_cross_entropy_with_logits(labels=tf.cast(
        tf.one_hot(x[:, 1:], 256), dtype=tf.float32),
                                                          logits=logits)
    # Define mean of loss across minibatch (for reporting through tf.Estimator).
    scalar_loss = tf.reduce_mean(vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf_estimator.ModeKeys.TRAIN:
        if FLAGS.dpsgd:

            optimizer = dp_optimizer.DPAdamGaussianOptimizer(
                l2_norm_clip=FLAGS.l2_norm_clip,
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=FLAGS.microbatches,
                learning_rate=FLAGS.learning_rate,
                unroll_microbatches=True)
            opt_loss = vector_loss
        else:
            optimizer = tf.compat.v1.train.AdamOptimizer(
                learning_rate=FLAGS.learning_rate)
            opt_loss = scalar_loss
        global_step = tf.compat.v1.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf_estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.metrics.accuracy(labels=tf.cast(x[:, 1:], dtype=tf.int32),
                                predictions=tf.argmax(input=logits, axis=2))
        }
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)
Esempio n. 12
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              regularization_losses=None):
        """See `_AbstractRankingHead`."""
        with tf.compat.v1.name_scope(self.name, 'multi_head'):
            self._check_logits_and_labels(logits, labels)
            # Get all estimator spec.
            all_estimator_spec = []
            for head in self._heads:
                all_estimator_spec.append(
                    head.create_estimator_spec(
                        features=features,
                        mode=mode,
                        logits=logits[head.name],
                        labels=labels[head.name] if labels else None))
            # Predict.
            if mode == tf_estimator.ModeKeys.PREDICT:
                export_outputs = self._merge_predict_export_outputs(
                    all_estimator_spec)
                return tf_estimator.EstimatorSpec(
                    mode=mode,
                    predictions=logits,
                    export_outputs=export_outputs)

            # Compute the merged loss and eval metrics.
            loss = self._merge_loss(labels, logits, features, mode,
                                    regularization_losses)
            eval_metric_ops = self._merge_metrics(all_estimator_spec)

            # Eval.
            if mode == tf_estimator.ModeKeys.EVAL:
                return tf_estimator.EstimatorSpec(
                    mode=mode,
                    predictions=logits,
                    loss=loss,
                    eval_metric_ops=eval_metric_ops)
            # Train.
            if mode == tf_estimator.ModeKeys.TRAIN:
                return tf_estimator.EstimatorSpec(
                    mode=mode,
                    loss=loss,
                    train_op=_get_train_op(loss, self._train_op_fn,
                                           self._optimizer),
                    predictions=logits,
                    eval_metric_ops=eval_metric_ops)
            raise ValueError('mode={} unrecognized'.format(mode))
Esempio n. 13
0
def _get_estimator_spec_with_metrics(logits: tf.Tensor,
                                     softmax_logits: tf.Tensor,
                                     duplicate_mask: tf.Tensor,
                                     num_training_neg: int,
                                     match_mlperf: bool = False,
                                     use_tpu_spec: bool = False):
    """Returns a EstimatorSpec that includes the metrics."""
    cross_entropy, \
    metric_fn, \
    in_top_k, \
    ndcg, \
    metric_weights = compute_eval_loss_and_metrics_helper(
        logits,
        softmax_logits,
        duplicate_mask,
        num_training_neg,
        match_mlperf)

    if use_tpu_spec:
        return tf_estimator.tpu.TPUEstimatorSpec(
            mode=tf_estimator.ModeKeys.EVAL,
            loss=cross_entropy,
            eval_metrics=(metric_fn, [in_top_k, ndcg, metric_weights]))

    return tf_estimator.EstimatorSpec(mode=tf_estimator.ModeKeys.EVAL,
                                      loss=cross_entropy,
                                      eval_metric_ops=metric_fn(
                                          in_top_k, ndcg, metric_weights))
Esempio n. 14
0
def cnn_model_fn(features, labels, mode):
    """Model function for a CNN."""

    # Define CNN architecture using tf.keras.layers.
    input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
    y = tf.keras.layers.Conv2D(16,
                               8,
                               strides=2,
                               padding='same',
                               activation='relu').apply(input_layer)
    y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    y = tf.keras.layers.Conv2D(32,
                               4,
                               strides=2,
                               padding='valid',
                               activation='relu').apply(y)
    y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    y = tf.keras.layers.Flatten().apply(y)
    y = tf.keras.layers.Dense(32, activation='relu').apply(y)
    logits = tf.keras.layers.Dense(10).apply(y)

    # Calculate loss as a vector and as its average across minibatch.
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                                 logits=logits)
    scalar_loss = tf.reduce_mean(vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf_estimator.ModeKeys.TRAIN:
        optimizer = tf.compat.v1.train.GradientDescentOptimizer(
            FLAGS.learning_rate)
        opt_loss = scalar_loss
        global_step = tf.compat.v1.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf_estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.metrics.accuracy(labels=labels,
                                predictions=tf.argmax(input=logits, axis=1))
        }
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)
  def model_fn(features, labels, mode, config):
    """Model function for custom estimator."""
    del config
    dense_values = tf.sparse.to_dense(
        features['sparse_values'], validate_indices=False)
    a = tf.Variable(1.0, dtype=tf.float32, name='a')
    b = tf.Variable(2.0, dtype=tf.float32, name='b')
    c = tf.Variable(3.0, dtype=tf.float32, name='c')
    d = tf.Variable(4.0, dtype=tf.float32, name='d')
    e = tf.Variable(5.0, dtype=tf.float32, name='e')
    f = tf.Variable(6.0, dtype=tf.float32, name='f')
    predictions = (
        a * tf.reduce_sum(input_tensor=features['embedding'][:, 0, :], axis=1) +
        b * tf.reduce_sum(input_tensor=features['embedding'][:, 1, :], axis=1) +
        c * tf.reduce_sum(input_tensor=features['embedding'][:, 2, :], axis=1) +
        d * tf.reduce_sum(input_tensor=dense_values[:, 0, :], axis=1) +
        e * tf.reduce_sum(input_tensor=dense_values[:, 1, :], axis=1) +
        f * tf.reduce_sum(input_tensor=dense_values[:, 2, :], axis=1))

    if mode == tf_estimator.ModeKeys.PREDICT:
      return tf_estimator.EstimatorSpec(
          mode=mode,
          predictions={'score': predictions},
          export_outputs={
              'score': tf_estimator.export.RegressionOutput(predictions)
          })

    loss = tf.compat.v1.losses.mean_squared_error(
        labels, tf.expand_dims(predictions, axis=-1))

    optimizer = tf.compat.v1.train.GradientDescentOptimizer(
        learning_rate=0.0001)
    train_op = optimizer.minimize(
        loss=loss, global_step=tf.compat.v1.train.get_global_step())

    return tf_estimator.EstimatorSpec(
        mode=mode,
        loss=loss,
        train_op=train_op,
        eval_metric_ops={
            'mean_squared_error':
                tf.compat.v1.metrics.mean_squared_error(
                    labels, tf.expand_dims(predictions, axis=-1)),
            'mean_prediction':
                tf.compat.v1.metrics.mean(predictions),
        },
        predictions=predictions)
Esempio n. 16
0
    def _model_fn(self, features, labels, mode, params=None):
        network = NetworkFactory.get(self._model_name)
        # ----------------------------------------

        # ----------------------------------------

        return estimator.EstimatorSpec(
            mode=mode,

        )
def small_cnn_fn(features, labels, mode):
    """Setup a small CNN for image classification."""
    input_layer = tf.reshape(features['x'], [-1, 32, 32, 3])
    for _ in range(3):
        y = tf.keras.layers.Conv2D(32, (3, 3), activation='relu')(input_layer)
        y = tf.keras.layers.MaxPool2D()(y)

    y = tf.keras.layers.Flatten()(y)
    y = tf.keras.layers.Dense(64, activation='relu')(y)
    logits = tf.keras.layers.Dense(10)(y)

    if mode != tf_estimator.ModeKeys.PREDICT:
        vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=labels, logits=logits)
        scalar_loss = tf.reduce_mean(input_tensor=vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf_estimator.ModeKeys.TRAIN:
        optimizer = tf.train.MomentumOptimizer(
            learning_rate=FLAGS.learning_rate, momentum=0.9)
        global_step = tf.compat.v1.train.get_global_step()
        train_op = optimizer.minimize(loss=scalar_loss,
                                      global_step=global_step)
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf_estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.metrics.accuracy(labels=labels,
                                predictions=tf.argmax(input=logits, axis=1))
        }
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)

    # Output the prediction probability (for PREDICT mode).
    elif mode == tf_estimator.ModeKeys.PREDICT:
        predictions = tf.nn.softmax(logits)
        return tf_estimator.EstimatorSpec(mode=mode, predictions=predictions)
    def model_fn(features, mode, config):
        """Model function for custom estimator."""
        del config
        predictions = features['prediction']

        if output_prediction_key is not None:
            predictions_dict = {
                output_prediction_key: predictions,
            }
        else:
            # For simulating Estimators which don't return a predictions dict in
            # EVAL mode.
            predictions_dict = {}

        if mode == tf_estimator.ModeKeys.PREDICT:
            return tf_estimator.EstimatorSpec(
                mode=mode,
                predictions=predictions_dict,
                export_outputs={
                    tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    tf_estimator.export.RegressionOutput(predictions)
                })

        # We use create a nonsensical loss that is easy to compute:
        # loss = mean(predictions^2) and export it as as the average loss for
        # testing that the metrics are computed correctly.
        loss = tf.compat.v1.losses.mean_squared_error(
            predictions, tf.zeros_like(predictions))
        train_op = tf.compat.v1.assign_add(
            tf.compat.v1.train.get_global_step(), 1)
        eval_metric_ops = {
            metric_keys.MetricKeys.LOSS_MEAN: tf.compat.v1.metrics.mean(loss),
        }

        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          train_op=train_op,
                                          predictions=predictions_dict,
                                          eval_metric_ops=eval_metric_ops)
def model_fn(features, labels, mode, config):
    """Model function for custom estimator."""
    del labels
    del config
    classes = features['classes']
    scores = features['scores']

    with tf.control_dependencies(
        [tf.assert_less(tf.shape(classes)[0], tf.constant(2))]):
        scores = tf.identity(scores)

    predictions = {
        prediction_keys.PredictionKeys.LOGITS: scores,
        prediction_keys.PredictionKeys.PROBABILITIES: scores,
        prediction_keys.PredictionKeys.PREDICTIONS: scores,
        prediction_keys.PredictionKeys.CLASSES: classes,
    }

    if mode == tf_estimator.ModeKeys.PREDICT:
        return tf_estimator.EstimatorSpec(
            mode=mode,
            predictions=predictions,
            export_outputs={
                tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                tf_estimator.export.ClassificationOutput(scores=scores,
                                                         classes=classes),
            })

    loss = tf.constant(0.0)
    train_op = tf.compat.v1.assign_add(tf.compat.v1.train.get_global_step(), 1)
    eval_metric_ops = {
        metric_keys.MetricKeys.LOSS_MEAN: tf.compat.v1.metrics.mean(loss),
    }

    return tf_estimator.EstimatorSpec(mode=mode,
                                      loss=loss,
                                      train_op=train_op,
                                      predictions=predictions,
                                      eval_metric_ops=eval_metric_ops)
    def model_fn(features, labels, mode, config):
        """Model function for custom estimator."""
        del config
        m = tf.Variable(0.0, dtype=tf.float32, name='m')
        c = tf.Variable(0.0, dtype=tf.float32, name='c')
        predictions = m * features['age'] + c

        if mode == tf_estimator.ModeKeys.PREDICT:
            return tf_estimator.EstimatorSpec(
                mode=mode,
                predictions={'score': predictions},
                export_outputs={
                    'score': tf_estimator.export.RegressionOutput(predictions)
                })

        loss = tf.compat.v1.losses.mean_squared_error(labels, predictions)
        eval_metric_ops = {
            'mean_absolute_error':
            tf.compat.v1.metrics.mean_absolute_error(
                tf.cast(labels, tf.float64), tf.cast(predictions, tf.float64)),
            'mean_prediction':
            tf.compat.v1.metrics.mean(predictions),
            'mean_label':
            tf.compat.v1.metrics.mean(labels),
        }

        optimizer = tf.compat.v1.train.GradientDescentOptimizer(
            learning_rate=0.001)
        train_op = optimizer.minimize(
            loss=loss, global_step=tf.compat.v1.train.get_global_step())

        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          train_op=train_op,
                                          eval_metric_ops=eval_metric_ops,
                                          predictions=predictions)
Esempio n. 21
0
        def linear_model_fn(features, labels, mode):
            preds = tf.keras.layers.Dense(1, activation='linear',
                                          name='dense')(features['x'])

            vector_loss = tf.math.squared_difference(labels, preds)
            scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
            dp_sum_query = gaussian_query.GaussianSumQuery(1.0, 0.0)
            optimizer = dp_optimizer.DPGradientDescentOptimizer(
                dp_sum_query, num_microbatches=1, learning_rate=1.0)
            global_step = tf.compat.v1.train.get_global_step()
            train_op = optimizer.minimize(loss=vector_loss,
                                          global_step=global_step)
            return tf_estimator.EstimatorSpec(mode=mode,
                                              loss=scalar_loss,
                                              train_op=train_op)
Esempio n. 22
0
def cnn_model_fn(features, labels, mode):
    """ Input function for CNN """
    input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
    # 1st cnn layer
    conv1 = tf.layers.conv2d(inputs=input_layer, filters=32, kernel_size=[5, 5], padding="same", activation=tf.nn.relu)
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
    # 2nd cnn layer
    conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[5, 5], padding="same", activation=tf.nn.relu)
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
    # 3rd cnn layer
    pool_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
    dense = tf.layers.dense(inputs=pool_flat, units=1024, activation=tf.nn.relu)
    dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)

    # logit layer
    logits = tf.layers.dense(inputs=dropout, units=10)

    # calculate outputs and probabilities
    predictions = {
        "classes": tf.argmax(input=logits, axis=1),
        "probablities": tf.nn.softmax(logits, name="softmax_tensor")
    }

    # return the prediction mode results
    if mode == estimator.ModeKeys.PREDICT:
        return estimator.EstimatorSpec(mode=mode, predictions=predictions)

    loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

    if mode == estimator.ModeKeys.TRAIN:
        optimizer = tf.train.GradientDescentOptimizer(0.001)
        train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
        return estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)

    eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions['classes'])}
    return estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
Esempio n. 23
0
        def linear_model_fn(features, labels, mode):
            preds = tf.keras.layers.Dense(1, activation='linear',
                                          name='dense')(features['x'])

            vector_loss = tf.math.squared_difference(labels, preds)
            scalar_loss = tf.reduce_mean(input_tensor=vector_loss)
            optimizer = VectorizedDPSGD(l2_norm_clip=1.0,
                                        noise_multiplier=0.,
                                        num_microbatches=1,
                                        learning_rate=1.0)
            global_step = tf.compat.v1.train.get_global_step()
            train_op = optimizer.minimize(loss=vector_loss,
                                          global_step=global_step)
            return tf_estimator.EstimatorSpec(mode=mode,
                                              loss=scalar_loss,
                                              train_op=train_op)
    def eval_spec(self, loss, labels):
        g = tf.get_default_graph()
        D = g.get_tensor_by_name(PAIRWISE_DISTANCES + ':0')
        D *= -1
        _, top_1 = tf.nn.top_k(D, 2)
        top_1 = top_1[:, 1]
        estimated = tf.gather_nd(labels, top_1[:, None])

        # Define the metrics:
        metrics_dict = {
            'Map@1': tf.metrics.accuracy(labels, estimated)}

        # return eval spec
        return estimator.EstimatorSpec(
            estimator.ModeKeys.EVAL,
            loss=loss,
            eval_metric_ops=metrics_dict)
Esempio n. 25
0
  def model_fn(features, labels, mode, params):
    """Returns the model function."""
    feature = features['feature']
    labels = labels['label']
    one_hot_labels = model_utils.get_label(
        labels,
        params,
        FLAGS.src_num_classes,
        batch_size=FLAGS.train_batch_size)

    def get_logits():
      """Return the logits."""
      network_output = model.conv_model(
          feature,
          mode,
          target_dataset=FLAGS.target_dataset,
          src_hw=FLAGS.src_hw,
          target_hw=FLAGS.target_hw)
      name = FLAGS.cls_dense_name
      with tf.variable_scope('target_CLS'):
        logits = tf.layers.dense(
            inputs=network_output, units=FLAGS.src_num_classes, name=name)
      return logits

    logits = get_logits()
    logits = tf.cast(logits, tf.float32)

    dst_loss = tf.losses.softmax_cross_entropy(
        logits=logits,
        onehot_labels=one_hot_labels,
    )
    loss = dst_loss

    eval_metrics = model_utils.metric_fn(labels, logits)

    return tf_estimator.EstimatorSpec(
        mode=mode,
        loss=loss,
        train_op=None,
        eval_metric_ops=eval_metrics,
    )
Esempio n. 26
0
        def linear_model_fn(features, labels, mode):
            layer = tf.keras.layers.Dense(1,
                                          activation='linear',
                                          name='dense',
                                          kernel_initializer='zeros',
                                          bias_initializer='zeros')
            preds = layer(features)

            vector_loss = 0.5 * tf.math.squared_difference(labels, preds)
            scalar_loss = tf.reduce_mean(input_tensor=vector_loss)

            optimizer = opt_cls(l2_norm_clip=l2_norm_clip,
                                noise_multiplier=noise_multiplier,
                                num_microbatches=num_microbatches,
                                learning_rate=learning_rate)

            params = layer.trainable_weights
            global_step = tf.compat.v1.train.get_global_step()
            train_op = tf.group(
                optimizer.get_updates(loss=vector_loss, params=params),
                [tf.compat.v1.assign_add(global_step, 1)])
            return tf_estimator.EstimatorSpec(mode=mode,
                                              loss=scalar_loss,
                                              train_op=train_op)
Esempio n. 27
0
def rnn_model_fn(features, labels, mode, params):
    """RNN tf.estimator.Estimator model_fn definition.

  Args:
    features: ({str: Tensor}) The feature tensors provided by the input_fn.
    labels: (Tensor) The labels tensor provided by the input_fn.
    mode: (tf.estimator.ModeKeys) The invocation mode of the model.
    params: (dict) Model configuration parameters.
  Returns:
    (tf.estimator.EstimatorSpec) Model specification.
  """
    # Support both dict-based and HParams-based params.
    if not isinstance(params, dict):
        params = params.values()

    logits_train = build_rnn_inference_subgraph(features,
                                                reuse=False,
                                                params=params)
    logits_test = build_rnn_inference_subgraph(features,
                                               reuse=True,
                                               params=params)

    pred_labels = tf.argmax(logits_test, axis=1)
    pred_probas = tf.nn.softmax(logits_test)

    if mode == tf_estimator.ModeKeys.PREDICT:
        return tf_estimator.EstimatorSpec(
            mode=mode,
            predictions={
                'label': pred_labels,
                'proba': pred_probas,
            },
        )

    # Note: labels=None when mode==PREDICT (see tf.estimator API).
    one_hot_labels = tf.one_hot(labels, params['num_classes'])

    if mode == tf_estimator.ModeKeys.TRAIN:
        loss_train = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits_train,
                                                    labels=one_hot_labels))
        tf.summary.scalar('loss_train', loss_train)

        optimizer = tf.train.RMSPropOptimizer(
            learning_rate=params['learning_rate'])
        train_op = optimizer.minimize(loss_train,
                                      global_step=tf.train.get_global_step())

        return tf_estimator.EstimatorSpec(
            mode=mode,
            train_op=train_op,
            loss=loss_train,
        )

    accuracy = tf.metrics.accuracy(labels=labels, predictions=pred_labels)
    precision = tf.metrics.precision(labels=labels, predictions=pred_labels)
    recall = tf.metrics.recall(labels=labels, predictions=pred_labels)
    loss_test = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits_test,
                                                labels=one_hot_labels))
    tf.summary.scalar('loss_test', loss_test)

    return tf_estimator.EstimatorSpec(mode=mode,
                                      loss=loss_test,
                                      eval_metric_ops={
                                          'accuracy': accuracy,
                                          'precision': precision,
                                          'recall': recall,
                                      })
Esempio n. 28
0
    def model_fn(features, labels, mode, params):
        """Returns the model function."""
        feature = features['feature']
        labels = labels['label']
        one_hot_labels = model_utils.get_label(
            labels,
            params,
            FLAGS.src_num_classes,
            batch_size=FLAGS.train_batch_size)

        def get_logits():
            """Return the logits."""
            avg_pool = model.conv_model(feature,
                                        mode,
                                        target_dataset=FLAGS.target_dataset,
                                        src_hw=FLAGS.src_hw,
                                        target_hw=FLAGS.target_hw)
            name = 'final_dense_dst'
            with tf.variable_scope('target_CLS'):
                logits = tf.layers.dense(
                    inputs=avg_pool,
                    units=FLAGS.src_num_classes,
                    name=name,
                    kernel_initializer=tf.random_normal_initializer(
                        stddev=.05),
                )
            return logits

        logits = get_logits()
        logits = tf.cast(logits, tf.float32)

        dst_loss = tf.losses.softmax_cross_entropy(
            logits=logits,
            onehot_labels=one_hot_labels,
        )
        dst_l2_loss = FLAGS.weight_decay * tf.add_n([
            tf.nn.l2_loss(v) for v in tf.trainable_variables()
            if 'batch_normalization' not in v.name and 'kernel' in v.name
        ])

        loss = dst_loss + dst_l2_loss

        train_op = None
        if mode == tf_estimator.ModeKeys.TRAIN:
            cur_finetune_step = tf.train.get_global_step()
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                finetune_learning_rate = lr_schedule()
                optimizer = tf.train.MomentumOptimizer(
                    learning_rate=finetune_learning_rate,
                    momentum=0.9,
                    use_nesterov=True)
                train_op = tf.contrib.slim.learning.create_train_op(
                    loss, optimizer)
                with tf.variable_scope('finetune'):
                    train_op = optimizer.minimize(loss, cur_finetune_step)
        else:
            train_op = None

        eval_metrics = None
        if mode == tf_estimator.ModeKeys.EVAL:
            eval_metrics = model_utils.metric_fn(labels, logits)

        if mode == tf_estimator.ModeKeys.TRAIN:
            with tf.control_dependencies([train_op]):
                tf.summary.scalar('classifier/finetune_lr',
                                  finetune_learning_rate)
        else:
            train_op = None

        return tf_estimator.EstimatorSpec(
            mode=mode,
            loss=loss,
            train_op=train_op,
            eval_metric_ops=eval_metrics,
        )
def resnet_model_fn(features, labels, mode, params):
    """The model_fn for ResNet-50.

  Args:
    features: A dictionary with different features
    labels: A int32 batch of labels.
    mode: Specifies whether training or evaluation.
    params: Dictionary of parameters passed to the model.

  Returns:
    A TPUEstimatorSpec for the model
  """

    images = features['images_batch']
    labels = tf.reshape(features['labels_batch'], [-1])
    if params['dataset'] == 'imagenet':
        images -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=images.dtype)
        images /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=images.dtype)

    def build_network():
        network = resnet_model.resnet_v1_(
            resnet_depth=params['resnet_depth'],
            num_classes=params['num_label_classes'],
            data_format=FLAGS.data_format)
        return network(inputs=images,
                       is_training=(mode == tf_estimator.ModeKeys.TRAIN))

    logits = build_network()

    output_dir = params['output_dir']  # pylint: disable=unused-variable
    # Calculate loss, which includes softmax cross entropy and L2 regularization.
    one_hot_labels = tf.one_hot(labels, params['num_label_classes'])

    cross_entropy = tf.compat.v1.losses.softmax_cross_entropy(
        logits=logits,
        onehot_labels=one_hot_labels,
        label_smoothing=FLAGS.label_smoothing)

    reg_loss = 0.0
    if mode == tf_estimator.ModeKeys.TRAIN:
        if params['regularize_gradients']:
            ## if regularize_aux evaluate perceptual quality at earlier layer
            one_hot_labels = tf.one_hot(labels, params['num_label_classes'])
            reg_loss = reg.compute_reg_loss(params['regularizer'], logits,
                                            images, one_hot_labels)
            reg_loss *= params['reg_scale']

    # Add weight decay to the loss for non-batch-normalization variables.
    # Add the regularizer to optimize for gradient heatmap with higher
    # perceptual quality.
    loss = cross_entropy + reg_loss + FLAGS.weight_decay * tf.add_n([
        tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables()
        if 'batch_normalization' not in v.name
    ])
    global_step = tf.compat.v1.train.get_global_step()
    if mode == tf_estimator.ModeKeys.TRAIN:
        train_op = train_function(loss, params, global_step)
        tf.summary.scalar('reg_loss', reg_loss, step=global_step)
        tf.summary.scalar('cross_entropy', cross_entropy, step=global_step)
    else:
        train_op = None

    eval_metrics = None
    if mode == tf_estimator.ModeKeys.EVAL:
        eval_metrics = (create_eval_metrics, [labels, logits])

    return tf_estimator.EstimatorSpec(mode=mode,
                                      loss=loss,
                                      train_op=train_op,
                                      eval_metric_ops=eval_metrics)
Esempio n. 30
0
def nn_model_fn(features, labels, mode):
    """NN adapted from github.com/hexiangnan/neural_collaborative_filtering."""
    n_latent_factors_user = 10
    n_latent_factors_movie = 10
    n_latent_factors_mf = 5

    user_input = tf.reshape(features['user'], [-1, 1])
    item_input = tf.reshape(features['movie'], [-1, 1])

    # number of users: 6040; number of movies: 3706
    mf_embedding_user = tf.keras.layers.Embedding(6040,
                                                  n_latent_factors_mf,
                                                  input_length=1)
    mf_embedding_item = tf.keras.layers.Embedding(3706,
                                                  n_latent_factors_mf,
                                                  input_length=1)
    mlp_embedding_user = tf.keras.layers.Embedding(6040,
                                                   n_latent_factors_user,
                                                   input_length=1)
    mlp_embedding_item = tf.keras.layers.Embedding(3706,
                                                   n_latent_factors_movie,
                                                   input_length=1)

    # GMF part
    # Flatten the embedding vector as latent features in GMF
    mf_user_latent = tf.keras.layers.Flatten()(mf_embedding_user(user_input))
    mf_item_latent = tf.keras.layers.Flatten()(mf_embedding_item(item_input))
    # Element-wise multiply
    mf_vector = tf.keras.layers.multiply([mf_user_latent, mf_item_latent])

    # MLP part
    # Flatten the embedding vector as latent features in MLP
    mlp_user_latent = tf.keras.layers.Flatten()(mlp_embedding_user(user_input))
    mlp_item_latent = tf.keras.layers.Flatten()(mlp_embedding_item(item_input))
    # Concatenation of two latent features
    mlp_vector = tf.keras.layers.concatenate(
        [mlp_user_latent, mlp_item_latent])

    predict_vector = tf.keras.layers.concatenate([mf_vector, mlp_vector])

    logits = tf.keras.layers.Dense(5)(predict_vector)

    # Calculate loss as a vector (to support microbatches in DP-SGD).
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                                 logits=logits)
    # Define mean of loss across minibatch (for reporting through tf.Estimator).
    scalar_loss = tf.reduce_mean(vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf_estimator.ModeKeys.TRAIN:
        if FLAGS.dpsgd:
            # Use DP version of GradientDescentOptimizer. Other optimizers are
            # available in dp_optimizer. Most optimizers inheriting from
            # tf.compat.v1.train.Optimizer should be wrappable in differentially
            # private counterparts by calling dp_optimizer.optimizer_from_args().
            optimizer = dp_optimizer.DPAdamGaussianOptimizer(
                l2_norm_clip=FLAGS.l2_norm_clip,
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=microbatches,
                learning_rate=FLAGS.learning_rate)
            opt_loss = vector_loss
        else:
            optimizer = tf.compat.v1.train.AdamOptimizer(
                learning_rate=FLAGS.learning_rate)
            opt_loss = scalar_loss

        global_step = tf.compat.v1.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
        # In the following, we pass the mean of the loss (scalar_loss) rather than
        # the vector_loss because tf.estimator requires a scalar loss. This is only
        # used for evaluation and debugging by tf.estimator. The actual loss being
        # minimized is opt_loss defined above and passed to optimizer.minimize().
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    if mode == tf_estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'rmse':
            tf.compat.v1.metrics.root_mean_squared_error(
                labels=tf.cast(labels, tf.float32),
                predictions=tf.tensordot(a=tf.nn.softmax(logits, axis=1),
                                         b=tf.constant(np.array(
                                             [0, 1, 2, 3, 4]),
                                                       dtype=tf.float32),
                                         axes=1))
        }
        return tf_estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)
    return None