Пример #1
0
    def test_reduce_sum(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        x = tfe.define_private_variable(tf.constant([[1, 2, 3], [4, 5, 6]]))
        y = tfe.define_constant(np.array([[1, 2, 3], [4, 5, 6]]))

        z1 = x.reduce_sum(axis=1, keepdims=True)
        z2 = tfe.reduce_sum(y, axis=0, keepdims=False)

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            result = sess.run(z1.reveal())
            np.testing.assert_allclose(result,
                                       np.array([[6], [15]]),
                                       rtol=0.0,
                                       atol=0.01)

            result = sess.run(z2)
            np.testing.assert_allclose(result,
                                       np.array([5, 7, 9]),
                                       rtol=0.0,
                                       atol=0.01)
Пример #2
0
 def backward(self, x, dy, learning_rate=0.01):
     batch_size = x.shape.as_list()[0]
     with tf.name_scope("backward"):
         dw = tfe.matmul(tfe.transpose(x), dy) / batch_size
         db = tfe.reduce_sum(dy, axis=0) / batch_size
         assign_ops = [
             tfe.assign(self.w, self.w - dw * learning_rate),
             tfe.assign(self.b, self.b - db * learning_rate),
         ]
         return assign_ops
Пример #3
0
def test_simple_lr_model():
    tf.reset_default_graph()

    import time
    start = time.time()
    prot = ABY3()
    tfe.set_protocol(prot)

    # define inputs
    x_raw = tf.random.uniform(minval=-0.5,
                              maxval=0.5,
                              shape=[99, 10],
                              seed=1000)
    x = tfe.define_private_variable(x_raw, name="x")
    y_raw = tf.cast(tf.reduce_mean(x_raw, axis=1, keepdims=True) > 0,
                    dtype=tf.float32)
    y = tfe.define_private_variable(y_raw, name="y")
    w = tfe.define_private_variable(tf.random_uniform([10, 1],
                                                      -0.01,
                                                      0.01,
                                                      seed=100),
                                    name="w")
    b = tfe.define_private_variable(tf.zeros([1]), name="b")
    learning_rate = 0.01

    with tf.name_scope("forward"):
        out = tfe.matmul(x, w) + b
        y_hat = tfe.sigmoid(out)

    with tf.name_scope("loss-grad"):
        dy = y_hat - y
    batch_size = x.shape.as_list()[0]
    with tf.name_scope("backward"):
        dw = tfe.matmul(tfe.transpose(x), dy) / batch_size
        db = tfe.reduce_sum(dy, axis=0) / batch_size
        upd1 = dw * learning_rate
        upd2 = db * learning_rate
        assign_ops = [tfe.assign(w, w - upd1), tfe.assign(b, b - upd2)]

    with tfe.Session() as sess:
        # initialize variables
        sess.run(tfe.global_variables_initializer())
        for i in range(1):
            sess.run(assign_ops)

        print(sess.run(w.reveal()))
    end = time.time()
    print("Elapsed time: {} seconds".format(end - start))
Пример #4
0
xp, yp = tfe.define_private_input('input-provider', lambda: gen_training_input(training_set_size, nb_feats, batch_size))
xp_test, yp_test = tfe.define_private_input('input-provider', lambda: gen_test_input(training_set_size, nb_feats, batch_size))

W = tfe.define_private_variable(tf.random_uniform([nb_feats, 1], -0.01, 0.01))
b = tfe.define_private_variable(tf.zeros([1]))

# Training model
out = tfe.matmul(xp, W) + b
pred = tfe.sigmoid(out)
# Due to missing log function approximation, we need to compute the cost in numpy
# cost = -tfe.sum(y * tfe.log(pred) + (1 - y) * tfe.log(1 - pred)) * (1/train_batch_size)

# Backprop
dc_dout = pred - yp
dW = tfe.matmul(tfe.transpose(xp), dc_dout) * (1 / batch_size)
db = tfe.reduce_sum(1. * dc_dout, axis=0) * (1 / batch_size)
ops = [
    tfe.assign(W, W - dW * learning_rate),
    tfe.assign(b, b - db * learning_rate)
]

# Testing model
pred_test = tfe.sigmoid(tfe.matmul(xp_test, W) + b)


def print_accuracy(pred_test_tf, y_test_tf: tf.Tensor) -> tf.Operation:
    correct_prediction = tf.equal(tf.round(pred_test_tf), y_test_tf)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    return tf.print("Accuracy", accuracy)

Пример #5
0
 def loss(self, y_hat, y, batch_size):
     """ Compute L2 loss """
     with tf.name_scope("loss"):
         # loss_val = 0.5 * tfe.reduce_sum(tfe.square(y_hat - y)) / batch_size
         loss_val = tfe.reduce_sum(tfe.square(y_hat - y)) / batch_size
         return loss_val