Exemplo n.º 1
0
    def set_weights(self, weights, sess=None):
        """ Sets the weights of the layer.
    Arguments:
      weights: A list of Numpy arrays with shapes and types
          matching the output of layer.get_weights() or a list
          of private variables
      sess: tfe session"""

        weights_types = (np.ndarray, PondPrivateTensor, PondMaskedTensor)
        assert isinstance(weights[0], weights_types), type(weights[0])

        # Assign new keras weights to existing weights defined by
        # default when tfe layer was instantiated
        if not sess:
            sess = KE.get_session()

        if isinstance(weights[0], np.ndarray):
            for i, w in enumerate(self.weights):
                shape = w.shape.as_list()
                tfe_weights_pl = tfe.define_private_placeholder(shape)
                fd = tfe_weights_pl.feed(weights[i].reshape(shape))
                sess.run(tfe.assign(w, tfe_weights_pl), feed_dict=fd)
        elif isinstance(weights[0], PondPrivateTensor):
            for i, w in enumerate(self.weights):
                shape = w.shape.as_list()
                sess.run(tfe.assign(w, weights[i].reshape(shape)))
Exemplo n.º 2
0
 def backward(self, x, dy, learning_rate=0.01):
     batch_size = x.shape.as_list()[0]
     with tf.name_scope("backward"):
         dw = tfe.matmul(tfe.transpose(x), dy) / batch_size
         db = tfe.reduce_sum(dy, axis=0) / batch_size
         assign_ops = [
             tfe.assign(self.w, self.w - dw * learning_rate),
             tfe.assign(self.b, self.b - db * learning_rate),
         ]
         return assign_ops
Exemplo n.º 3
0
def test_simple_lr_model():
    tf.reset_default_graph()

    import time
    start = time.time()
    prot = ABY3()
    tfe.set_protocol(prot)

    # define inputs
    x_raw = tf.random.uniform(minval=-0.5,
                              maxval=0.5,
                              shape=[99, 10],
                              seed=1000)
    x = tfe.define_private_variable(x_raw, name="x")
    y_raw = tf.cast(tf.reduce_mean(x_raw, axis=1, keepdims=True) > 0,
                    dtype=tf.float32)
    y = tfe.define_private_variable(y_raw, name="y")
    w = tfe.define_private_variable(tf.random_uniform([10, 1],
                                                      -0.01,
                                                      0.01,
                                                      seed=100),
                                    name="w")
    b = tfe.define_private_variable(tf.zeros([1]), name="b")
    learning_rate = 0.01

    with tf.name_scope("forward"):
        out = tfe.matmul(x, w) + b
        y_hat = tfe.sigmoid(out)

    with tf.name_scope("loss-grad"):
        dy = y_hat - y
    batch_size = x.shape.as_list()[0]
    with tf.name_scope("backward"):
        dw = tfe.matmul(tfe.transpose(x), dy) / batch_size
        db = tfe.reduce_sum(dy, axis=0) / batch_size
        upd1 = dw * learning_rate
        upd2 = db * learning_rate
        assign_ops = [tfe.assign(w, w - upd1), tfe.assign(b, b - upd2)]

    with tfe.Session() as sess:
        # initialize variables
        sess.run(tfe.global_variables_initializer())
        for i in range(1):
            sess.run(assign_ops)

        print(sess.run(w.reveal()))
    end = time.time()
    print("Elapsed time: {} seconds".format(end - start))
Exemplo n.º 4
0
    def set_weights(self, weights, sess=None):
        """Update layer weights from numpy array or Public Tensors
      including denom.

    Arguments:
      weights: A list of Numpy arrays with shapes and types
          matching the output of layer.get_weights() or a list
          of private variables
      sess: tfe session"""

        if not sess:
            sess = KE.get_session()

        if isinstance(weights[0], np.ndarray):
            for i, w in enumerate(self.weights):
                if isinstance(w, PondPublicTensor):
                    shape = w.shape.as_list()
                    tfe_weights_pl = tfe.define_public_placeholder(shape)
                    fd = tfe_weights_pl.feed(weights[i].reshape(shape))
                    sess.run(tfe.assign(w, tfe_weights_pl), feed_dict=fd)
                else:
                    raise TypeError(
                        (
                            "Don't know how to handle weights "
                            "of type {}. Batchnorm expects public tensors"
                            "as weights"
                        ).format(type(w))
                    )

        elif isinstance(weights[0], PondPublicTensor):
            for i, w in enumerate(self.weights):
                shape = w.shape.as_list()
                sess.run(tfe.assign(w, weights[i].reshape(shape)))

        # Compute denom on public tensors before being lifted to private tensor
        denomtemp = tfe.reciprocal(
            tfe.sqrt(tfe.add(self.moving_variance, self.epsilon))
        )

        # Update denom as well when moving variance gets updated
        sess.run(tfe.assign(self.denom, denomtemp))
Exemplo n.º 5
0
    def test_public_assign(self):

        with tfe.protocol.Pond() as prot:
            x_var = prot.define_public_variable(np.zeros(shape=(2, 2)))
            data = np.ones((2, 2))
            x_pl = tfe.define_public_placeholder(shape=(2, 2))
            fd = x_pl.feed(data.reshape((2, 2)))

        with tfe.Session() as sess:
            sess.run(tfe.assign(x_var, x_pl), feed_dict=fd)
            result = sess.run(x_var)
            np.testing.assert_array_equal(result, np.ones([2, 2]))
Exemplo n.º 6
0
 def apply_gradients(self, var, grad):
     sess = KE.get_session()
     for i, w in enumerate(var):
         sess.run(tfe.assign(w, w - grad[i] * self.lr))
Exemplo n.º 7
0
W = tfe.define_private_variable(tf.random_uniform([nb_feats, 1], -0.01, 0.01))
b = tfe.define_private_variable(tf.zeros([1]))

# Training model
out = tfe.matmul(xp, W) + b
pred = tfe.sigmoid(out)
# Due to missing log function approximation, we need to compute the cost in numpy
# cost = -tfe.sum(y * tfe.log(pred) + (1 - y) * tfe.log(1 - pred)) * (1/train_batch_size)

# Backprop
dc_dout = pred - yp
dW = tfe.matmul(tfe.transpose(xp), dc_dout) * (1 / batch_size)
db = tfe.reduce_sum(1. * dc_dout, axis=0) * (1 / batch_size)
ops = [
    tfe.assign(W, W - dW * learning_rate),
    tfe.assign(b, b - db * learning_rate)
]

# Testing model
pred_test = tfe.sigmoid(tfe.matmul(xp_test, W) + b)


def print_accuracy(pred_test_tf, y_test_tf: tf.Tensor) -> tf.Operation:
    correct_prediction = tf.equal(tf.round(pred_test_tf), y_test_tf)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    return tf.print("Accuracy", accuracy)


print_acc_op = tfe.define_output('input-provider', [pred_test, yp_test], print_accuracy)