def linear_model_fn(features, labels, mode):
            preds = tf.keras.layers.Dense(1, activation='linear',
                                          name='dense').apply(features['x'])

            vector_loss = tf.squared_difference(labels, preds)
            scalar_loss = tf.reduce_mean(vector_loss)
            dp_average_query = gaussian_query.GaussianAverageQuery(1.0, 0.0, 1)
            optimizer = dp_optimizer.DPGradientDescentOptimizer(
                dp_average_query, num_microbatches=1, learning_rate=1.0)
            global_step = tf.train.get_global_step()
            train_op = optimizer.minimize(loss=vector_loss,
                                          global_step=global_step)
            return tf.estimator.EstimatorSpec(mode=mode,
                                              loss=scalar_loss,
                                              train_op=train_op)
Beispiel #2
0
    def define_training_procedure(self, parameters):
        # Define training procedure
        self.global_step = tf.Variable(0, name="global_step", trainable=False)
        if parameters['optimizer'] == 'adam':
            self.optimizer = tf.train.AdamOptimizer(parameters['learning_rate'])
        elif parameters['optimizer'] == 'sgd':
#            self.optimizer = tf.train.GradientDescentOptimizer(parameters['learning_rate'])
###alteration to make it private
#            self.optimizer = dp_optimizer.DPGradientDescentGaussianOptimizer(
            self.optimizer = dp_optimizer.DPGradientDescentOptimizer(
                  l2_norm_clip=1.0,#Clipping norm
                  noise_multiplier=1.1,
                  num_microbatches=256,
#                  ledger=ledger,
                  learning_rate=parameters['learning_rate'])
#            training_hooks = [
#               EpsilonPrintingTrainingHook(ledger)
#            ]
#            opt_loss = vector_loss
###
        elif parameters['optimizer'] == 'adadelta':
            self.optimizer = tf.train.AdadeltaOptimizer(parameters['learning_rate'])
        else:
            raise ValueError('The lr_method parameter must be either adadelta, adam or sgd.')

###alteration to make it private
        grads_and_vars = self.optimizer.compute_gradients(self.vector_loss,self.input_label_indices_vector)
#        grads_and_vars = self.optimizer.compute_gradients(self.loss)
###
        if parameters['gradient_clipping_value']:
            grads_and_vars = [(tf.clip_by_value(grad, -parameters['gradient_clipping_value'], parameters['gradient_clipping_value']), var) 
                              for grad, var in grads_and_vars if grad is not None]
#            grads_and_vars = [(tf.clip_by_value(grad, -parameters['gradient_clipping_value'], parameters['gradient_clipping_value']), var) 
#                              for grad, var in grads_and_vars]
        # By defining a global_step variable and passing it to the optimizer we allow TensorFlow handle the counting of training steps for us.
        # The global step will be automatically incremented by one every time you execute train_op.
        self.train_op = self.optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)