def test_depthwise_conv2d_set_weights(self): input_shape = (1, 10, 10, 3) input_data = np.random.normal(size=input_shape) with tf.Session(): model = tf.keras.models.Sequential() model.add(tf.keras.layers.DepthwiseConv2D(kernel_size=(2, 2), batch_input_shape=input_shape)) expected = model.predict(input_data) k_weights = model.get_weights() k_config = model.get_config() with tfe.protocol.SecureNN(): x = tfe.define_private_input( "inputter", lambda: tf.convert_to_tensor(input_data)) tfe_model = tfe.keras.models.model_from_config(k_config) tfe_model.set_weights(k_weights) y = tfe_model(x) with KE.get_session() as sess: actual = sess.run(y.reveal()) np.testing.assert_allclose(actual, expected, rtol=1e-2, atol=1e-2) KE.clear_session()
def fit(self, x, y, epochs=1, steps_per_epoch=1): """Trains the model for a given number of epochs (iterations on a dataset). Arguments: x: Private tensor of training data y: Private tensor of target (label) data epochs: Integer. Number of epochs to train the model. steps_per_epoch: Integer. Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. """ assert isinstance(x, PondPrivateTensor), type(x) assert isinstance(y, PondPrivateTensor), type(y) # Initialize variables before starting to train sess = KE.get_session() sess.run(tf.global_variables_initializer()) for e in range(epochs): print("Epoch {}/{}".format(e + 1, epochs)) batch_size = x.shape.as_list()[0] progbar = utils.Progbar(batch_size * steps_per_epoch) for _ in range(steps_per_epoch): self.fit_batch(x, y) progbar.add(batch_size, values=[("loss", self._current_loss)])
def set_weights(self, weights, sess=None): """ Sets the weights of the layer. Arguments: weights: A list of Numpy arrays with shapes and types matching the output of layer.get_weights() or a list of private variables sess: tfe session""" weights_types = (np.ndarray, PondPrivateTensor, PondMaskedTensor) assert isinstance(weights[0], weights_types), type(weights[0]) # Assign new keras weights to existing weights defined by # default when tfe layer was instantiated if not sess: sess = KE.get_session() if isinstance(weights[0], np.ndarray): for i, w in enumerate(self.weights): shape = w.shape.as_list() tfe_weights_pl = tfe.define_private_placeholder(shape) fd = tfe_weights_pl.feed(weights[i].reshape(shape)) sess.run(tfe.assign(w, tfe_weights_pl), feed_dict=fd) elif isinstance(weights[0], PondPrivateTensor): for i, w in enumerate(self.weights): shape = w.shape.as_list() sess.run(tfe.assign(w, weights[i].reshape(shape)))
def fit_batch(self, x, y): """Trains the model on a single batch. Arguments: x: Private tensor of training data y: Private tensor of target (label) data """ y_pred = self.call(x) dy = self._loss.grad(y, y_pred) self.backward(dy) loss = self._loss(y, y_pred) sess = KE.get_session() self._current_loss = sess.run(loss.reveal())
def test_from_config(self): input_shape = (1, 3) input_data = np.random.normal(size=input_shape) expected, k_weights, k_config = _model_predict_keras( input_data, input_shape) with tfe.protocol.SecureNN(): x = tfe.define_private_input( "inputter", lambda: tf.convert_to_tensor(input_data)) tfe_model = Sequential.from_config(k_config) tfe_model.set_weights(k_weights) y = tfe_model(x) with KE.get_session() as sess: actual = sess.run(y.reveal()) np.testing.assert_allclose(actual, expected, rtol=1e-2, atol=1e-3) KE.clear_session()
def set_weights(self, weights, sess=None): """Update layer weights from numpy array or Public Tensors including denom. Arguments: weights: A list of Numpy arrays with shapes and types matching the output of layer.get_weights() or a list of private variables sess: tfe session""" if not sess: sess = KE.get_session() if isinstance(weights[0], np.ndarray): for i, w in enumerate(self.weights): if isinstance(w, PondPublicTensor): shape = w.shape.as_list() tfe_weights_pl = tfe.define_public_placeholder(shape) fd = tfe_weights_pl.feed(weights[i].reshape(shape)) sess.run(tfe.assign(w, tfe_weights_pl), feed_dict=fd) else: raise TypeError( ( "Don't know how to handle weights " "of type {}. Batchnorm expects public tensors" "as weights" ).format(type(w)) ) elif isinstance(weights[0], PondPublicTensor): for i, w in enumerate(self.weights): shape = w.shape.as_list() sess.run(tfe.assign(w, weights[i].reshape(shape))) # Compute denom on public tensors before being lifted to private tensor denomtemp = tfe.reciprocal( tfe.sqrt(tfe.add(self.moving_variance, self.epsilon)) ) # Update denom as well when moving variance gets updated sess.run(tfe.assign(self.denom, denomtemp))
def test_conv_model(self): num_classes = 10 input_shape = (1, 28, 28, 1) input_data = np.random.normal(size=input_shape) with tf.Session(): model = tf.keras.models.Sequential() model.add( tf.keras.layers.Conv2D(2, (3, 3), batch_input_shape=input_shape)) model.add(tf.keras.layers.ReLU()) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.AveragePooling2D((2, 2))) model.add(tf.keras.layers.Conv2D(2, (3, 3))) model.add(tf.keras.layers.ReLU()) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.AveragePooling2D((2, 2))) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(num_classes, name="logit")) expected = model.predict(input_data) k_weights = model.get_weights() k_config = model.get_config() with tfe.protocol.SecureNN(): x = tfe.define_private_input( "inputter", lambda: tf.convert_to_tensor(input_data)) tfe_model = tfe.keras.models.model_from_config(k_config) tfe_model.set_weights(k_weights) y = tfe_model(x) with KE.get_session() as sess: actual = sess.run(y.reveal()) np.testing.assert_allclose(actual, expected, rtol=1e-2, atol=1e-2) KE.clear_session()
def test_clone_model(self): input_shape = (1, 3) input_data = np.random.normal(size=input_shape) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Dense(2, batch_input_shape=input_shape)) model.add(tf.keras.layers.Dense(3)) expected = model.predict(input_data) with tfe.protocol.SecureNN(): x = tfe.define_private_input( "inputter", lambda: tf.convert_to_tensor(input_data)) tfe_model = tfe.keras.models.clone_model(model) with KE.get_session() as sess: y = tfe_model(x) actual = sess.run(y.reveal()) np.testing.assert_allclose(actual, expected, rtol=1e-2, atol=1e-3) KE.clear_session()
def set_weights(self, weights, sess=None): """Sets the weights of the model. Arguments: weights: A list of Numpy arrays with shapes and types matching the output of model.get_weights() sess: tfe.Session instance. """ if not sess: sess = KE.get_session() # Updated weights for each layer for layer in self.layers: num_param = len(layer.weights) if num_param == 0: continue layer_weights = weights[:num_param] layer.set_weights(layer_weights, sess) weights = weights[num_param:]
def apply_gradients(self, var, grad): sess = KE.get_session() for i, w in enumerate(var): sess.run(tfe.assign(w, w - grad[i] * self.lr))