Esempio n. 1
0
def train_model(model, train_x, train_y, save_weights=False):
    """Train the model on given data."""
    optimizer = dp_optimizer_vectorized.VectorizedDPSGD(
        l2_norm_clip=FLAGS.l2_norm_clip,
        noise_multiplier=FLAGS.noise_multiplier,
        num_microbatches=FLAGS.microbatches,
        learning_rate=FLAGS.learning_rate)

    loss = tf.keras.losses.CategoricalCrossentropy(
        from_logits=True, reduction=tf.losses.Reduction.NONE)

    # Compile model with Keras
    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])

    if save_weights:
        wts = model.get_weights()
        np.save('save_model', wts)
        model.set_weights(wts)
        return model

    if FLAGS.load_weights:  # load preset weights
        wts = np.load('save_model.npy', allow_pickle=True).tolist()
        model.set_weights(wts)

    # Train model with Keras
    model.fit(train_x,
              train_y,
              epochs=FLAGS.epochs,
              validation_data=(train_x, train_y),
              batch_size=FLAGS.batch_size,
              verbose=0)
    return model
Esempio n. 2
0
def create_optimizer(learning_rate_var):
    if FLAGS.optimizer == 'adam':
        optimizer = tfv1.train.AdamOptimizer(learning_rate=learning_rate_var,
                                             beta1=FLAGS.beta1,
                                             beta2=FLAGS.beta2,
                                             epsilon=FLAGS.epsilon)
    elif FLAGS.optimizer == 'sgd':
        optimizer = tfv1.train.GradientDescentOptimizer(learning_rate=1)
    elif FLAGS.optimizer == 'dp-sgd':
        from tensorflow_privacy.privacy.analysis import compute_dp_sgd_privacy_lib
        from tensorflow_privacy.privacy.optimizers import dp_optimizer
        optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
            l2_norm_clip=FLAGS.gradient_clip_value,
            noise_multiplier=FLAGS.gradient_noise / FLAGS.gradient_clip_value *
            np.sqrt(FLAGS.train_batch_size),
            num_microbatches=FLAGS.train_batch_size // FLAGS.microbatch_size,
            learning_rate=learning_rate_var)
    elif FLAGS.optimizer == 'fast-dp-sgd':
        from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized
        optimizer = dp_optimizer_vectorized.VectorizedDPSGD(
            l2_norm_clip=FLAGS.gradient_clip_value,
            noise_multiplier=FLAGS.gradient_noise / FLAGS.gradient_clip_value,
            num_microbatches=FLAGS.train_batch_size // FLAGS.microbatch_size,
            learning_rate=learning_rate_var)

    return optimizer
Esempio n. 3
0
def nn_model_fn(model, loss_fn, args, features, labels, mode):
    logits = model()(features['x'])
    vector_loss = loss_fn(labels=labels, logits=logits)
    scalar_loss = tf.reduce_mean(vector_loss)

    if mode == tf.estimator.ModeKeys.TRAIN:
        if args.dpsgd:
            # Use DP version of GradientDescentOptimizer. Other optimizers are
            # available in dp_optimizer. Most optimizers inheriting from
            # tf.train.Optimizer should be wrappable in differentially private
            # counterparts by calling dp_optimizer.optimizer_from_args().
            optimizer = dp_optimizer_vectorized.VectorizedDPSGD(
                l2_norm_clip=args.l2_norm_clip,
                noise_multiplier=args.noise_multiplier,
                num_microbatches=args.microbatches,
                learning_rate=args.learning_rate)
            opt_loss = vector_loss
        else:
            optimizer = tf.train.GradientDescentOptimizer(
                learning_rate=args.learning_rate)
            opt_loss = scalar_loss
        global_step = tf.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
        # In the following, we pass the mean of the loss (scalar_loss) rather than
        # the vector_loss because tf.estimator requires a scalar loss. This is only
        # used for evaluation and debugging by tf.estimator. The actual loss being
        # minimized is opt_loss defined above and passed to optimizer.minimize().
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    elif mode == tf.estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.metrics.accuracy(labels=labels,
                                predictions=tf.argmax(input=logits, axis=1))
        }

        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)
Esempio n. 4
0
def train_model(model, train_x, train_y):
  """Train the model on given data."""
  optimizer = dp_optimizer_vectorized.VectorizedDPSGD(
      l2_norm_clip=FLAGS.l2_norm_clip,
      noise_multiplier=FLAGS.noise_multiplier,
      num_microbatches=FLAGS.microbatches,
      learning_rate=FLAGS.learning_rate)

  # gradient of (.5-x.w)^2 is 2(.5-x.w)x
  loss = tf.keras.losses.MeanSquaredError(reduction=tf.losses.Reduction.NONE)

  # Compile model with Keras
  model.compile(optimizer=optimizer, loss=loss, metrics=['mse'])

  # Train model with Keras
  model.fit(train_x, train_y,
            epochs=FLAGS.epochs,
            validation_data=(train_x, train_y),
            batch_size=FLAGS.batch_size,
            verbose=0)
  return model
Esempio n. 5
0
def cnn_model_fn(features, labels, mode):
    """Model function for a CNN."""

    # Define CNN architecture using tf.keras.layers.
    input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
    y = tf.keras.layers.Conv2D(16,
                               8,
                               strides=2,
                               padding='same',
                               activation='relu').apply(input_layer)
    y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    y = tf.keras.layers.Conv2D(32,
                               4,
                               strides=2,
                               padding='valid',
                               activation='relu').apply(y)
    y = tf.keras.layers.MaxPool2D(2, 1).apply(y)
    y = tf.keras.layers.Flatten().apply(y)
    y = tf.keras.layers.Dense(32, activation='relu').apply(y)
    logits = tf.keras.layers.Dense(10).apply(y)

    # Calculate loss as a vector (to support microbatches in DP-SGD).
    vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                                 logits=logits)
    # Define mean of loss across minibatch (for reporting through tf.Estimator).
    scalar_loss = tf.reduce_mean(input_tensor=vector_loss)

    # Configure the training op (for TRAIN mode).
    if mode == tf.estimator.ModeKeys.TRAIN:

        if FLAGS.dpsgd:
            # Use DP version of GradientDescentOptimizer. Other optimizers are
            # available in dp_optimizer. Most optimizers inheriting from
            # tf.train.Optimizer should be wrappable in differentially private
            # counterparts by calling dp_optimizer.optimizer_from_args().
            optimizer = dp_optimizer_vectorized.VectorizedDPSGD(
                l2_norm_clip=FLAGS.l2_norm_clip,
                noise_multiplier=FLAGS.noise_multiplier,
                num_microbatches=FLAGS.microbatches,
                learning_rate=FLAGS.learning_rate)
            opt_loss = vector_loss
        else:
            optimizer = GradientDescentOptimizer(
                learning_rate=FLAGS.learning_rate)
            opt_loss = scalar_loss
        global_step = tf.compat.v1.train.get_global_step()
        train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
        # In the following, we pass the mean of the loss (scalar_loss) rather than
        # the vector_loss because tf.estimator requires a scalar loss. This is only
        # used for evaluation and debugging by tf.estimator. The actual loss being
        # minimized is opt_loss defined above and passed to optimizer.minimize().
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          train_op=train_op)

    # Add evaluation metrics (for EVAL mode).
    elif mode == tf.estimator.ModeKeys.EVAL:
        eval_metric_ops = {
            'accuracy':
            tf.compat.v1.metrics.accuracy(labels=labels,
                                          predictions=tf.argmax(input=logits,
                                                                axis=1))
        }

        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=scalar_loss,
                                          eval_metric_ops=eval_metric_ops)