def lr_model_fn(features, labels, mode, nclasses, dim):
    """Model function for logistic regression."""
    input_layer = tf.reshape(features['x'], tuple([-1]) + dim)
    logits = tf.layers.dense(
        inputs=input_layer,
        units=nclasses,
        kernel_regularizer=tf.contrib.layers.l2_regularizer(
            scale=FLAGS.regularizer),
        bias_regularizer=tf.contrib.layers.l2_regularizer(
            scale=FLAGS.regularizer))

    # Calculate loss as a vector (to support microbatches in DP-SGD).
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits) + tf.losses.get_regularization_loss()
    # Define mean of loss across minibatch (for reporting through tf.Estimator).
    scalar_loss = tf.reduce_mean(vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf.estimator.ModeKeys.TRAIN:
        if FLAGS.dpsgd:
            # The loss function is L-Lipschitz with L = sqrt(2*(||x||^2 + 1)) where
            # ||x|| is the norm of the data.
            # We don't use microbatches (thus speeding up computation), since no
            # clipping is necessary due to data normalization.
            optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
                l2_norm_clip=math.sqrt(2 * (FLAGS.data_l2_norm**2 + 1)),
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=1,
                learning_rate=FLAGS.learning_rate)
            opt_loss = vector_loss
        else:
            optimizer = GradientDescentOptimizer(
                learning_rate=FLAGS.learning_rate)
            opt_loss = scalar_loss
        global_step = tf.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
        # In the following, we pass the mean of the loss (scalar_loss) rather than
        # the vector_loss because tf.estimator requires a scalar loss. This is only
        # used for evaluation and debugging by tf.estimator. The actual loss being
        # minimized is opt_loss defined above and passed to optimizer.minimize().
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf.estimator.ModeKeys.EVAL:
        pred_classes = tf.argmax(logits, axis=1)
        acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
        recall_op = tf.metrics.recall(labels=labels, predictions=pred_classes)
        precision_op = tf.metrics.precision(labels=labels,
                                            predictions=pred_classes)
        auc_op = tf.metrics.auc(labels=labels, predictions=pred_classes)
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops={
                                              'recall': recall_op,
                                              'accuracy': acc_op,
                                              'precision': precision_op,
                                              'auc': auc_op
                                          })
示例#2
0
    def define_training_procedure(self, parameters):
        # Define training procedure
        self.global_step = tf.Variable(0, name="global_step", trainable=False)
        if parameters['optimizer'] == 'adam':
            self.optimizer = tf.train.AdamOptimizer(parameters['learning_rate'])
        elif parameters['optimizer'] == 'sgd':
#            self.optimizer = tf.train.GradientDescentOptimizer(parameters['learning_rate'])
            ###alteration to make it private
#            self.optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
            self.optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
                  l2_norm_clip=1.0,
                  noise_multiplier=1.1,
                  num_microbatches=37114,
                  population_size=37114,
                  learning_rate=parameters['learning_rate'])
#            training_hooks = [
#               EpsilonPrintingTrainingHook(ledger)
#            ]
#            opt_loss = vector_loss
###

        elif parameters['optimizer'] == 'adadelta':
            self.optimizer = tf.train.AdadeltaOptimizer(parameters['learning_rate'])
        else:
            raise ValueError('The lr_method parameter must be either adadelta, adam or sgd.')

#        grads_and_vars = self.optimizer.compute_gradients(self.loss)
        ###alteration to make it private
#        grads_and_vars = self.optimizer.compute_gradients(self.vector_loss)
        self.train_op = self.optimizer.minimize(loss=self.vector_loss, global_step=self.global_step)
        ###
        """
示例#3
0
    def define_training_procedure(self, parameters):
        # Define training procedure
        self.global_step = tf.Variable(0, name="global_step", trainable=False)
        if parameters['optimizer'] == 'adam':
            self.optimizer = tf.train.AdamOptimizer(parameters['learning_rate'])
        elif parameters['optimizer'] == 'sgd':
#            self.optimizer = tf.train.GradientDescentOptimizer(parameters['learning_rate'])
###alteration to make it private
            self.optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
                  l2_norm_clip=1.0,#Clipping norm
                  noise_multiplier=1.1,
                  num_microbatches=256,
#                  ledger=ledger,
                  learning_rate=parameters['learning_rate'])
#            training_hooks = [
#               EpsilonPrintingTrainingHook(ledger)
#            ]
#            opt_loss = vector_loss
###
        elif parameters['optimizer'] == 'adadelta':
            self.optimizer = tf.train.AdadeltaOptimizer(parameters['learning_rate'])
        else:
            raise ValueError('The lr_method parameter must be either adadelta, adam or sgd.')

###alteration to make it private
        grads_and_vars = self.optimizer.compute_gradients(self.vector_loss)
#        grads_and_vars = self.optimizer.compute_gradients(self.loss)
###
        if parameters['gradient_clipping_value']:
            grads_and_vars = [(tf.clip_by_value(grad, -parameters['gradient_clipping_value'], parameters['gradient_clipping_value']), var) 
                              for grad, var in grads_and_vars]
        # By defining a global_step variable and passing it to the optimizer we allow TensorFlow handle the counting of training steps for us.
        # The global step will be automatically incremented by one every time you execute train_op.
        self.train_op = self.optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)
示例#4
0
def cnn_model_fn(features, labels, mode):
  """Model function for a CNN."""

  # Define CNN architecture using tf.keras.layers.
  input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
  y = tf.keras.layers.Conv2D(16, 8,
                             strides=2,
                             padding='same').apply(input_layer)
  y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
  y = tf.keras.layers.Conv2D(32, 4, strides=2, padding='valid').apply(y)
  y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
  y = tf.keras.layers.Flatten().apply(y)
  y = tf.keras.layers.Dense(32).apply(y)
  logits = tf.keras.layers.Dense(10).apply(y)

  # Calculate loss as a vector (to support microbatches in DP-SGD).
  vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits)
  # Define mean of loss across minibatch (for reporting through tf.Estimator).
  scalar_loss = tf.reduce_mean(vector_loss)

  # Configure the training op (for TRAIN mode).
  if mode == tf.estimator.ModeKeys.TRAIN:

    if FLAGS.dpsgd:
      # Use DP version of GradientDescentOptimizer. For illustration purposes,
      # we do that here by calling optimizer_from_args() explicitly, though DP
      # versions of standard optimizers are available in dp_optimizer.
      optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
          l2_norm_clip=FLAGS.l2_norm_clip,
          noise_multiplier=FLAGS.noise_multiplier,
          num_microbatches=FLAGS.microbatches,
          learning_rate=FLAGS.learning_rate)
      opt_loss = vector_loss
    else:
      optimizer = tf.train.GradientDescentOptimizer(
          learning_rate=FLAGS.learning_rate)
      opt_loss = scalar_loss
    global_step = tf.train.get_global_step()
    train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
    # In the following, we pass the mean of the loss (scalar_loss) rather than
    # the vector_loss because tf.estimator requires a scalar loss. This is only
    # used for evaluation and debugging by tf.estimator. The actual loss being
    # minimized is opt_loss defined above and passed to optimizer.minimize().
    return tf.estimator.EstimatorSpec(mode=mode,
                                      loss=scalar_loss,
                                      train_op=train_op)

  # Add evaluation metrics (for EVAL mode).
  elif mode == tf.estimator.ModeKeys.EVAL:
    eval_metric_ops = {
        'accuracy':
            tf.metrics.accuracy(
                labels=labels,
                predictions=tf.argmax(input=logits, axis=1))
    }
    return tf.estimator.EstimatorSpec(mode=mode,
                                      loss=scalar_loss,
                                      eval_metric_ops=eval_metric_ops)
def lr_model_fn(features, labels, mode, nclasses, dim):
    """Model function for logistic regression."""
    input_layer = tf.reshape(features['x'], tuple([-1]) + dim)

    logits = tf.layers.dense(
        inputs=input_layer,
        units=nclasses,
        kernel_regularizer=tf.contrib.layers.l2_regularizer(
            scale=FLAGS.regularizer),
        bias_regularizer=tf.contrib.layers.l2_regularizer(
            scale=FLAGS.regularizer))

    # Calculate loss as a vector (to support microbatches in DP-SGD).
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=labels, logits=logits) + tf.losses.get_regularization_loss()
    # Define mean of loss across minibatch (for reporting through tf.Estimator).
    scalar_loss = tf.reduce_mean(vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf.estimator.ModeKeys.TRAIN:

        if FLAGS.dpsgd:
            # Use DP version of GradientDescentOptimizer. Other optimizers are
            # available in dp_optimizer. Most optimizers inheriting from
            # tf.train.Optimizer should be wrappable in differentially private
            # counterparts by calling dp_optimizer.optimizer_from_args().
            # The loss function is L-Lipschitz with L = sqrt(2*(||x||^2 + 1)) where
            # ||x|| is the norm of the data.
            optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
                l2_norm_clip=math.sqrt(2 * (FLAGS.data_l2_norm**2 + 1)),
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=FLAGS.microbatches,
                learning_rate=FLAGS.learning_rate)
            opt_loss = vector_loss
        else:
            optimizer = GradientDescentOptimizer(
                learning_rate=FLAGS.learning_rate)
            opt_loss = scalar_loss
        global_step = tf.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
        # In the following, we pass the mean of the loss (scalar_loss) rather than
        # the vector_loss because tf.estimator requires a scalar loss. This is only
        # used for evaluation and debugging by tf.estimator. The actual loss being
        # minimized is opt_loss defined above and passed to optimizer.minimize().
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf.estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.metrics.accuracy(labels=labels,
                                predictions=tf.argmax(input=logits, axis=1))
        }
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)
示例#6
0
def model_fn(features, labels, mode):
	logits = linear_layer(features)

	# vector loss: each component of the vector correspond to an individual training point and label.
	# Use for per example gradient later.
	vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
		labels=tf.cast(labels, dtype=tf.int64))#=labels) # change compare w mnist


	scalar_loss = tf.reduce_mean(vector_loss)
	print('*******************')
	print(vector_loss.dtype)
	print(scalar_loss.dtype)
	if mode == tf.estimator.ModeKeys.TRAIN:

		if FLAGS.dpsgd:
			ledger = privacy_ledger.PrivacyLedger(
				population_size=60000,
				selection_probability=(FLAGS.batch_size / 60000))

			optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
				l2_norm_clip=FLAGS.l2_norm_clip,
				noise_multiplier=FLAGS.noise_multiplier,
				num_microbatches=FLAGS.microbatches,
				ledger=ledger,
				learning_rate=FLAGS.learning_rate)
			training_hooks = [
				EpsilonPrintingTrainingHook(ledger)
			]
			opt_loss = vector_loss
		else:
			optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
			#train_op  = optimizer.minimize(scalar_loss,
			#	global_step=tf.train.get_global_step())
			opt_loss = scalar_loss
			training_hooks = []
		global_step = tf.train.get_global_step()		
		train_op = optimizer.minimize(loss=opt_loss,
			global_step=global_step)
		return tf.estimator.EstimatorSpec(mode=mode,
			loss=scalar_loss,
			train_op=train_op,
			training_hooks=training_hooks)
	elif mode == tf.estimator.ModeKeys.EVAL:
		# pred_probas  = tf.nn.softmax(logits) # should I remove this ?
		pred_classes = tf.argmax(logits, axis=1)
		acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
		return tf.estimator.EstimatorSpec(mode=mode,
			loss=scalar_loss,
			eval_metric_ops={'accuracy':acc_op})
示例#7
0
def cnn_model_fn(features, labels):
    """Model function for a CNN."""

    # Define CNN architecture using tf.keras.layers.
    if FLAGS.dataset == "mnist":
        input_layer = tf.reshape(features, [-1, 28, 28, 1])
    elif FLAGS.dataset == "cifar10":
        input_layer = features
        # input_layer = tf.reshape(features, [-1, 32, 32, 3])
    elif FLAGS.dataset == "svhn":
        input_layer = tf.reshape(features, [-1, 32, 32, 3])

    # y = tf.keras.layers.Conv2D(16, 8,
    #                            strides=2,
    #                            padding='same',
    #                            activation='relu').apply(input_layer)
    # y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    # y = tf.keras.layers.Conv2D(32, 4,
    #                            strides=2,
    #                            padding='valid',
    #                            activation='relu').apply(y)
    # y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    # y = tf.keras.layers.Flatten().apply(y)
    # y = tf.keras.layers.Dense(32, activation='relu').apply(y)

    if FLAGS.model == "trival":
        logits = trival(input_layer=input_layer)
    elif FLAGS.model == "deep":
        logits = deep(input_layer=input_layer)
        # input_layer = tf.reshape(features, [-1, 32, 32, 3])
    elif FLAGS.model == "letnet":
        logits = trival(input_layer=input_layer)

    # Calculate accuracy.
    correct_pred = tf.equal(tf.argmax(logits, 1), labels)
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    # Calculate loss as a vector (to support microbatches in DP-SGD).
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                                 logits=logits)
    # Define mean of loss across minibatch (for reporting through tf.Estimator).
    scalar_loss = tf.reduce_mean(vector_loss)

    if FLAGS.dpsgd:
        ledger = privacy_ledger.PrivacyLedger(
            population_size=60000,
            selection_probability=(FLAGS.batch_size / 60000))

        # Use DP version of GradientDescentOptimizer. Other optimizers are
        # available in dp_optimizer. Most optimizers inheriting from
        # tf.train.Optimizer should be wrappable in differentially private
        # counterparts by calling dp_optimizer.optimizer_from_args().
        if FLAGS.method == 'sgd':
            optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
                l2_norm_clip=FLAGS.l2_norm_clip,
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=FLAGS.microbatches,
                ledger=ledger,
                learning_rate=FLAGS.learning_rate)
        elif FLAGS.method == 'adam':
            optimizer = dp_optimizer.DPAdamGaussianOptimizer(
                l2_norm_clip=FLAGS.l2_norm_clip,
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=FLAGS.microbatches,
                ledger=ledger,
                learning_rate=FLAGS.learning_rate,
                unroll_microbatches=True)
        elif FLAGS.method == 'adagrad':
            optimizer = dp_optimizer.DPAdagradGaussianOptimizer(
                l2_norm_clip=FLAGS.l2_norm_clip,
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=FLAGS.microbatches,
                ledger=ledger,
                learning_rate=FLAGS.learning_rate)
        elif FLAGS.method == 'momentum':
            optimizer = dp_optimizer.DPMomentumGaussianOptimizer(
                l2_norm_clip=FLAGS.l2_norm_clip,
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=FLAGS.microbatches,
                ledger=ledger,
                learning_rate=FLAGS.learning_rate,
                momentum=FLAGS.momentum,
                use_nesterov=FLAGS.use_nesterov)

        else:
            raise ValueError(
                'method must be sgd or adam or adagrad or momentum')
        opt_loss = vector_loss
    else:
        if FLAGS.method == 'sgd':
            optimizer = GradientDescentOptimizer(
                learning_rate=FLAGS.learning_rate)
        elif FLAGS.method == 'adam':
            optimizer = AdamOptimizer(learning_rate=FLAGS.learning_rate)
        elif FLAGS.method == 'adagrad':
            optimizer = AdagradOptimizer(learning_rate=FLAGS.learning_rate)
        elif FLAGS.method == 'momentum':
            optimizer = MomentumOptimizer(learning_rate=FLAGS.learning_rate,
                                          momentum=FLAGS.momentum,
                                          use_nesterov=FLAGS.use_nesterov)
        else:
            raise ValueError(
                'method must be sgd or adam or adagrad or momentum')
        opt_loss = scalar_loss
    global_step = tf.train.get_global_step()
    train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
    # In the following, we pass the mean of the loss (scalar_loss) rather than
    # the vector_loss because tf.estimator requires a scalar loss. This is only
    # used for evaluation and debugging by tf.estimator. The actual loss being
    # minimized is opt_loss defined above and passed to optimizer.minimize().
    return train_op, scalar_loss, accuracy
示例#8
0
def cnn_model_fn(features, labels, mode):
  """Model function for a CNN."""

  # Define CNN architecture using tf.keras.layers.
  input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
  y = tf.keras.layers.Conv2D(16, 8,
                             strides=2,
                             padding='same',
                             activation='relu').apply(input_layer)
  y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
  y = tf.keras.layers.Conv2D(32, 4,
                             strides=2,
                             padding='valid',
                             activation='relu').apply(y)
  y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
  y = tf.keras.layers.Flatten().apply(y)
  y = tf.keras.layers.Dense(32, activation='relu').apply(y)
  logits = tf.keras.layers.Dense(10).apply(y)

  # Calculate loss as a vector (to support microbatches in DP-SGD).
  vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits)
  # Define mean of loss across minibatch (for reporting through tf.Estimator).
  scalar_loss = tf.reduce_mean(vector_loss)

  # Configure the training op (for TRAIN mode).
  if mode == tf.estimator.ModeKeys.TRAIN:

    if FLAGS.dpsgd:
      ledger = privacy_ledger.PrivacyLedger(
          population_size=60000,
          selection_probability=(FLAGS.batch_size / 60000))

      # Use DP version of GradientDescentOptimizer. Other optimizers are
      # available in dp_optimizer. Most optimizers inheriting from
      # tf.train.Optimizer should be wrappable in differentially private
      # counterparts by calling dp_optimizer.optimizer_from_args().
      optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
          l2_norm_clip=FLAGS.l2_norm_clip,
          noise_multiplier=FLAGS.noise_multiplier,
          num_microbatches=FLAGS.microbatches,
          ledger=ledger,
          learning_rate=FLAGS.learning_rate)
      training_hooks = [
          EpsilonPrintingTrainingHook(ledger)
      ]
      opt_loss = vector_loss
    else:
      optimizer = GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
      training_hooks = []
      opt_loss = scalar_loss
    global_step = tf.train.get_global_step()
    train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
    # In the following, we pass the mean of the loss (scalar_loss) rather than
    # the vector_loss because tf.estimator requires a scalar loss. This is only
    # used for evaluation and debugging by tf.estimator. The actual loss being
    # minimized is opt_loss defined above and passed to optimizer.minimize().
    return tf.estimator.EstimatorSpec(mode=mode,
                                      loss=scalar_loss,
                                      train_op=train_op,
                                      training_hooks=training_hooks)

  # Add evaluation metrics (for EVAL mode).
  elif mode == tf.estimator.ModeKeys.EVAL:
    eval_metric_ops = {
        'accuracy':
            tf.metrics.accuracy(
                labels=labels,
                predictions=tf.argmax(input=logits, axis=1))
    }

    return tf.estimator.EstimatorSpec(mode=mode,
                                      loss=scalar_loss,
                                      eval_metric_ops=eval_metric_ops)
def cnn_model_fn(features, labels, mode):
    """Model function for a CNN."""

    # Define CNN architecture using tf.keras.layers.
    input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
    y = tf.keras.layers.Conv2D(16,
                               8,
                               strides=2,
                               padding='same',
                               activation='relu').apply(input_layer)
    y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    y = tf.keras.layers.Conv2D(32,
                               4,
                               strides=2,
                               padding='valid',
                               activation='relu').apply(y)
    y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    y = tf.keras.layers.Flatten().apply(y)
    y = tf.keras.layers.Dense(32, activation='relu').apply(y)
    logits = tf.keras.layers.Dense(10).apply(y)

    # Calculate loss as a vector and as its average across minibatch.
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                                 logits=logits)
    scalar_loss = tf.reduce_mean(vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf.estimator.ModeKeys.TRAIN:
        # optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
        opt_loss = scalar_loss
        global_step = tf.train.get_global_step()
        # train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)

        learning_rate = 0.25
        noise_multiplier = 1.2
        l2_norm_clip = 1.5
        batch_size = 256
        epochs = 15
        num_microbatches = 16

        optimizer = optimizers.DPGradientDescentGaussianOptimizer(
            l2_norm_clip=l2_norm_clip,
            noise_multiplier=noise_multiplier,
            num_microbatches=num_microbatches,
            learning_rate=FLAGS.learning_rate)
        train_op = optimizer.minimize(loss=vector_loss)

        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf.estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.metrics.accuracy(labels=labels,
                                predictions=tf.argmax(input=logits, axis=1))
        }
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)
示例#10
0
def cnn_model_fn(features, labels, mode):
    """Model function for a CNN."""

    # Define CNN architecture using tf.keras.layers.
    input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
    y = tf.keras.layers.Conv2D(16,
                               8,
                               strides=2,
                               padding='same',
                               activation='relu').apply(input_layer)
    y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    y = tf.keras.layers.Conv2D(32,
                               4,
                               strides=2,
                               padding='valid',
                               activation='relu').apply(y)
    y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    y = tf.keras.layers.Flatten().apply(y)
    y = tf.keras.layers.Dense(32, activation='relu').apply(y)
    logits = tf.keras.layers.Dense(10).apply(y)

    # Calculate loss as a vector and as its average across minibatch.
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                                 logits=logits)
    scalar_loss = tf.reduce_mean(vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf.estimator.ModeKeys.TRAIN:
        print("Train data mode")
        #optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
        optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
            l2_norm_clip=FLAGS.l2_norm_clip,
            noise_multiplier=FLAGS.noise_multiplier,
            num_microbatches=FLAGS.num_microbatches,
            learning_rate=FLAGS.learning_rate,
            population_size=60000,
            training_hooks=[EpsilonPrintingTrainingHook(ledger)])

        opt_loss = scalar_loss
        global_step = tf.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
        return (tf.estimator.EstimatorSpec(mode=mode,
                                           loss=scalar_loss,
                                           train_op=train_op))

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf.estimator.ModeKeys.EVAL:
        print("Evaluate data mode")

        eval_metric_ops = {
            'accuracy':
            tf.metrics.accuracy(labels=labels,
                                predictions=tf.argmax(input=logits, axis=1))
        }

        return (tf.estimator.EstimatorSpec(mode=mode,
                                           loss=scalar_loss,
                                           eval_metric_ops=eval_metric_ops))

    predicted_classes = tf.argmax(logits, 1)
    if mode == tf.estimator.ModeKeys.PREDICT:
        print("predicting data mode")
        predictions = {
            'class_ids': predicted_classes[:, tf.newaxis],
            'probabilities': tf.nn.softmax(logits),
            'logits': logits,
        }
        predictions.values()

        return (tf.estimator.EstimatorSpec(mode=mode, predictions=predictions))