Esempio n. 1
0
    def testComputeGradientsOverrideWarning(self, mock_logging):
        class SimpleOptimizer(tf.train.Optimizer):
            def compute_gradients(self):
                return 0

        dp_optimizer.make_optimizer_class(SimpleOptimizer)
        mock_logging.warning.assert_called_once_with(
            'WARNING: Calling make_optimizer_class() on class %s that overrides '
            'method compute_gradients(). Check to ensure that '
            'make_optimizer_class() does not interfere with overridden version.',
            'SimpleOptimizer')
Esempio n. 2
0
def cnn_model_fn(features, labels, mode):
    """Model function for a CNN."""

    # Define CNN architecture using tf.keras.layers.
    input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
    y = tf.keras.layers.Conv2D(
        16, 8, strides=2, padding='same',
        kernel_initializer='he_normal').apply(input_layer)
    y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    y = tf.keras.layers.Conv2D(32,
                               4,
                               strides=2,
                               padding='valid',
                               kernel_initializer='he_normal').apply(y)
    y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    y = tf.keras.layers.Flatten().apply(y)
    y = tf.keras.layers.Dense(32, kernel_initializer='he_normal').apply(y)
    logits = tf.keras.layers.Dense(10, kernel_initializer='he_normal').apply(y)

    # Calculate loss as a vector (to support microbatches in DP-SGD).
    vector_loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels,
                                                             logits=logits)
    # Define mean of loss across minibatch (for reporting through tf.Estimator).
    scalar_loss = tf.reduce_mean(vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf.estimator.ModeKeys.TRAIN:

        if FLAGS.dpsgd:
            # Use DP version of GradientDescentOptimizer. For illustration purposes,
            # we do that here by calling make_optimizer_class() explicitly, though DP
            # versions of standard optimizers are available in dp_optimizer.
            dp_optimizer_class = dp_optimizer.make_optimizer_class(
                tf.train.GradientDescentOptimizer)
            optimizer = dp_optimizer_class(
                learning_rate=FLAGS.learning_rate,
                noise_multiplier=FLAGS.noise_multiplier,
                l2_norm_clip=FLAGS.l2_norm_clip,
                num_microbatches=FLAGS.microbatches)
        else:
            optimizer = tf.train.GradientDescentOptimizer(
                learning_rate=FLAGS.learning_rate)
        global_step = tf.train.get_global_step()
        train_op = optimizer.minimize(loss=vector_loss,
                                      global_step=global_step)
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf.estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.metrics.accuracy(labels=tf.argmax(labels, axis=1),
                                predictions=tf.argmax(input=logits, axis=1))
        }
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)