Exemple #1
0
def test_polynomial_piecewise():
    tf.reset_default_graph()

    import time
    start = time.time()
    prot = ABY3()
    tfe.set_protocol(prot)

    x = tfe.define_private_variable(
        tf.constant([[-1, -0.5, -0.25], [0, 0.25, 2]]))

    # This is the approximation of the sigmoid function by using a piecewise function:
    # f(x) = (0 if x<-0.5), (x+0.5 if -0.5<=x<0.5), (1 if x>=0.5)
    z1 = tfe.polynomial_piecewise(
        x,
        (-0.5, 0.5),
        (
            (0, ), (0.5, 1), (1, )
        )  # Should use tuple because list is not hashable for the memoir cache key
    )
    # Or, simply use the pre-defined sigmoid API which includes a different approximation
    z2 = tfe.sigmoid(x)

    with tfe.Session() as sess:
        # initialize variables
        sess.run(tfe.global_variables_initializer())
        # reveal result
        result = sess.run(z1.reveal())
        close(result, np.array([[0, 0, 0.25], [0.5, 0.75, 1]]))
        result = sess.run(z2.reveal())
        close(result, np.array([[0.33, 0.415, 0.4575], [0.5, 0.5425, 0.84]]))
        print("test_polynomial_piecewise succeeds")

    end = time.time()
    print("Elapsed time: {} seconds".format(end - start))
    def test_polynomial_piecewise(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        x = tfe.define_private_variable(tf.constant([[-1, -0.5, -0.25], [0, 0.25, 2]]))

        # This is the approximation of the sigmoid function by using a piecewise function:
        # f(x) = (0 if x<-0.5), (x+0.5 if -0.5<=x<0.5), (1 if x>=0.5)
        z1 = tfe.polynomial_piecewise(
            x,
            (-0.5, 0.5),
            ((0,), (0.5, 1), (1,)),  # use tuple because list is not hashable
        )
        # Or, simply use the pre-defined sigmoid API which includes a different approximation
        z2 = tfe.sigmoid(x)

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            result = sess.run(z1.reveal())
            np.testing.assert_allclose(
                result, np.array([[0, 0, 0.25], [0.5, 0.75, 1]]), rtol=0.0, atol=0.01
            )
            result = sess.run(z2.reveal())
            np.testing.assert_allclose(
                result,
                np.array([[0.33, 0.415, 0.4575], [0.5, 0.5425, 0.84]]),
                rtol=0.0,
                atol=0.01,
            )
Exemple #3
0
def test_simple_lr_model():
    tf.reset_default_graph()

    import time
    start = time.time()
    prot = ABY3()
    tfe.set_protocol(prot)

    # define inputs
    x_raw = tf.random.uniform(minval=-0.5,
                              maxval=0.5,
                              shape=[99, 10],
                              seed=1000)
    x = tfe.define_private_variable(x_raw, name="x")
    y_raw = tf.cast(tf.reduce_mean(x_raw, axis=1, keepdims=True) > 0,
                    dtype=tf.float32)
    y = tfe.define_private_variable(y_raw, name="y")
    w = tfe.define_private_variable(tf.random_uniform([10, 1],
                                                      -0.01,
                                                      0.01,
                                                      seed=100),
                                    name="w")
    b = tfe.define_private_variable(tf.zeros([1]), name="b")
    learning_rate = 0.01

    with tf.name_scope("forward"):
        out = tfe.matmul(x, w) + b
        y_hat = tfe.sigmoid(out)

    with tf.name_scope("loss-grad"):
        dy = y_hat - y
    batch_size = x.shape.as_list()[0]
    with tf.name_scope("backward"):
        dw = tfe.matmul(tfe.transpose(x), dy) / batch_size
        db = tfe.reduce_sum(dy, axis=0) / batch_size
        upd1 = dw * learning_rate
        upd2 = db * learning_rate
        assign_ops = [tfe.assign(w, w - upd1), tfe.assign(b, b - upd2)]

    with tfe.Session() as sess:
        # initialize variables
        sess.run(tfe.global_variables_initializer())
        for i in range(1):
            sess.run(assign_ops)

        print(sess.run(w.reveal()))
    end = time.time()
    print("Elapsed time: {} seconds".format(end - start))
Exemple #4
0
                                      model_owner.provide_input,
                                      masked=True)  # pylint: disable=E0632

    # we'll use the same parameters for each prediction so we cache them to
    # avoid re-training each time
    cache_updater, params = tfe.cache(params)

    # get prediction input from client
    x = tfe.define_private_input(prediction_client.player_name,
                                 prediction_client.provide_input,
                                 masked=True)  # pylint: disable=E0632

    # compute prediction
    w0, b0, w1, b1 = params
    layer0 = tfe.matmul(x, w0) + b0
    layer1 = tfe.sigmoid(layer0 *
                         0.1)  # input normalized to avoid large values
    logits = tfe.matmul(layer1, w1) + b1

    # send prediction output back to client
    prediction_op = tfe.define_output(prediction_client.player_name, logits,
                                      prediction_client.receive_output)

    with tfe.Session(target=session_target) as sess:

        sess.run(tf.global_variables_initializer(), tag='init')

        print("Training")
        sess.run(cache_updater, tag='training')

        for _ in range(10):
            print("Predicting")
Exemple #5
0
def binary_crossentropy_from_logits(y_true, y_pred):
    y_pred = tfe.sigmoid(y_pred)
    return binary_crossentropy(y_true, y_pred)
Exemple #6
0
 def grad(self, y_true, y_pred):
     if self.from_logits:
         grad = tfe.sigmoid(y_pred) - y_true
     else:
         grad = y_pred - y_true
     return grad
Exemple #7
0
 def forward(self, x):
     with tf.name_scope("forward"):
         out = tfe.matmul(x, self.w_masked) + self.b_masked
         y = tfe.sigmoid(out)
         return y
learning_rate = 0.01
training_set_size = 2000
test_set_size = 100
training_epochs = 10
batch_size = 100
nb_feats = 10

xp, yp = tfe.define_private_input('input-provider', lambda: gen_training_input(training_set_size, nb_feats, batch_size))
xp_test, yp_test = tfe.define_private_input('input-provider', lambda: gen_test_input(training_set_size, nb_feats, batch_size))

W = tfe.define_private_variable(tf.random_uniform([nb_feats, 1], -0.01, 0.01))
b = tfe.define_private_variable(tf.zeros([1]))

# Training model
out = tfe.matmul(xp, W) + b
pred = tfe.sigmoid(out)
# Due to missing log function approximation, we need to compute the cost in numpy
# cost = -tfe.sum(y * tfe.log(pred) + (1 - y) * tfe.log(1 - pred)) * (1/train_batch_size)

# Backprop
dc_dout = pred - yp
dW = tfe.matmul(tfe.transpose(xp), dc_dout) * (1 / batch_size)
db = tfe.reduce_sum(1. * dc_dout, axis=0) * (1 / batch_size)
ops = [
    tfe.assign(W, W - dW * learning_rate),
    tfe.assign(b, b - db * learning_rate)
]

# Testing model
pred_test = tfe.sigmoid(tfe.matmul(xp_test, W) + b)
def sigmoid(x):
    """Computes sigmoid of x element-wise"""
    return tfe.sigmoid(x)
Exemple #10
0
 def forward(self, x):
     with tf.name_scope("forward"):
         out = tfe.matmul(x, self.w) + self.b
         y_hat = tfe.sigmoid(out)
         return y_hat
 def forward(self, x):
     y = tfe.sigmoid(x)
     self.layer_output = y
     return y