Example #1
0
    def compute_auc(self, sess, x, y, data_owner):
        """ Compute AUC """
        def print_auc(y_hat, y_in):
            with tf.name_scope("print_auc"):
                auc, update_auc_op = tf.metrics.auc(y_in, y_hat)
                op = tf.print("AUC on {}:".format(data_owner.player_name),
                              auc,
                              type(y_in),
                              type(y_hat),
                              summarize=6)
                return op

        with tf.name_scope("auc"):
            y_p = self.forward(x)
            print_auc_op = tfe.define_output(data_owner.player_name, [y_p, y],
                                             print_auc)
        print(
            f'local variables: {tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES)}'
        )
        sess.run(tf.initialize_local_variables())
        sess.run(print_auc_op)

        y_pred = sess.run(y_p.reveal())
        # print(type(y_pred), y_pred.shape, f'y pred: {y_pred}')
        y_true = sess.run(y.reveal())
        # print(type(y_true), y_true.shape, f'y true: {y_true}')
        acc = np.isclose(np.around(y_pred), y_true)
        print(f'Acc {np.mean(acc)}')
        print(f'AUC {roc_auc_score(y_true, y_pred)}')
        return y_pred, y_true
Example #2
0
    def loss(self, sess, x, y, player_name):
        def print_loss(y_hat, y):
            with tf.name_scope("print-loss"):
                loss = -y * tf.log(y_hat) - (1 - y) * tf.log(1 - y_hat)
                print_op = tf.print("Loss on {}:".format(player_name), loss)
                return print_op

        with tf.name_scope("loss"):
            y_hat = self.forward(x)
            print_loss_op = tfe.define_output(player_name, [y_hat, y],
                                              print_loss)
        sess.run(print_loss_op, tag="loss")
Example #3
0
def data_reveal(sess, data_owner, data):
    """ Print data value with tf.print """

    # @tfe.local_computation
    def _print(x):
        op = tf.print('Value on {}'.format(data_owner.player_name),
                      x,
                      summarize=-1)
        return op

    reveal_op = tfe.define_output(data_owner.player_name, [data], _print)
    sess.run(reveal_op)
Example #4
0
    def loss(self, model, x, y):
        def print_loss(y_hat, y):
            with tf.name_scope("print-loss"):
                loss = -y * tf.log(y_hat) - (1 - y) * tf.log(1 - y_hat)
                loss = tf.reduce_mean(loss)
                print_op = tf.print("Loss on {}:".format(self.player_name),
                                    loss)
                return print_op

        with tf.name_scope("loss"):
            y_hat = model.forward(x)
            print_loss_op = tfe.define_output(self.player_name, [y_hat, y],
                                              print_loss)
        return print_loss_op
Example #5
0
    def evaluate(self, sess, x, y, data_owner):
        def print_accuracy(y_hat, y) -> tf.Operation:
            with tf.name_scope("print-accuracy"):
                correct_prediction = tf.equal(tf.round(y_hat), y)
                accuracy = tf.reduce_mean(
                    tf.cast(correct_prediction, tf.float32))
                print_op = tf.print(
                    "Accuracy on {}:".format(data_owner.player_name), accuracy)
                return print_op

        with tf.name_scope("evaluate"):
            y_hat = self.forward(x)
            print_accuracy_op = tfe.define_output(data_owner.player_name,
                                                  [y_hat, y], print_accuracy)

        sess.run(print_accuracy_op, tag='evaluate')
Example #6
0
x_train_0, y_train_0 = tfe.define_private_input(
    data_owner_0.player_name, data_owner_0.provide_training_data)
x_train_1, y_train_1 = tfe.define_private_input(
    data_owner_1.player_name, data_owner_1.provide_training_data)

x_test_0, y_test_0 = tfe.define_private_input(
    data_owner_0.player_name, data_owner_0.provide_testing_data)
x_test_1, y_test_1 = tfe.define_private_input(
    data_owner_1.player_name, data_owner_1.provide_testing_data)

x_train = tfe.concat([x_train_0, x_train_1], axis=0)
y_train = tfe.concat([y_train_0, y_train_1], axis=0)

model = LogisticRegression(num_features)
reveal_weights_op = tfe.define_output(model_owner.player_name, model.weights,
                                      model_owner.receive_weights)

with tfe.Session() as sess:
    sess.run([
        tfe.global_variables_initializer(), data_owner_0.initializer,
        data_owner_1.initializer
    ],
             tag='init')

    model.fit(sess, x_train, y_train, num_batches)
    # TODO(Morten)
    # each evaluation results in nodes for a forward pass being added to the graph;
    # maybe there's some way to avoid this, even if it means only if the shapes match
    model.evaluate(sess, x_test_0, y_test_0, data_owner_0)
    model.evaluate(sess, x_test_1, y_test_1, data_owner_1)
Example #7
0
# get prediction input from client
x, y = tfe.define_private_input('prediction-client', prediction_client.provide_input, masked=True)  # pylint: disable=E0632

# helpers
conv = lambda x, w, s: tfe.conv2d(x, w, s, 'VALID')
pool = lambda x: tfe.avgpool2d(x, (2, 2), (2, 2), 'VALID')

# compute prediction
Wconv1, bconv1, Wfc1, bfc1, Wfc2, bfc2 = params
bconv1 = tfe.reshape(bconv1, [-1, 1, 1])
layer1 = pool(tfe.relu(conv(x, Wconv1, ModelTrainer.STRIDE) + bconv1))
layer1 = tfe.reshape(layer1, [-1, ModelTrainer.HIDDEN_FC1])
layer2 = tfe.matmul(layer1, Wfc1) + bfc1
logits = tfe.matmul(layer2, Wfc2) + bfc2

# send prediction output back to client
prediction_op = tfe.define_output('prediction-client', [logits, y], prediction_client.receive_output)


with tfe.Session() as sess:
    print("Init")
    sess.run(tf.global_variables_initializer(), tag='init')

    print("Training")
    sess.run(tfe.global_caches_updater(), tag='training')

    for _ in range(5):
        print("Predicting")
        sess.run(prediction_op, tag='prediction')
Example #8
0

def provide_input() -> tf.Tensor:
    # pick random tensor to be averaged
    return tf.random_normal(shape=(10, ))


if __name__ == '__main__':
    # get input from inputters as private values
    inputs = [
        tfe.define_private_input('inputter-0', provide_input),
        tfe.define_private_input('inputter-1', provide_input),
        tfe.define_private_input('inputter-2', provide_input),
        tfe.define_private_input('inputter-3', provide_input),
        tfe.define_private_input('inputter-4', provide_input),
    ]

    # sum all inputs and divide by count
    result = tfe.add_n(inputs) / len(inputs)

    def receive_output(average: tf.Tensor) -> tf.Operation:
        # simply print average
        return tf.print("Average:", average)

    # send result to receiver
    result_op = tfe.define_output('result-receiver', result, receive_output)

    # run a few times
    with tfe.Session() as sess:
        sess.run(result_op, tag='average')
Example #9
0
    cache_updater, params = tfe.cache(params)

    # get prediction input from client
    x = tfe.define_private_input(prediction_client.player_name,
                                 prediction_client.provide_input,
                                 masked=True)  # pylint: disable=E0632

    # compute prediction
    w0, b0, w1, b1 = params
    layer0 = tfe.matmul(x, w0) + b0
    layer1 = tfe.sigmoid(layer0 *
                         0.1)  # input normalized to avoid large values
    logits = tfe.matmul(layer1, w1) + b1

    # send prediction output back to client
    prediction_op = tfe.define_output(prediction_client.player_name, logits,
                                      prediction_client.receive_output)

    with tfe.Session(target=session_target) as sess:

        sess.run(tf.global_variables_initializer(), tag='init')

        print("Training")
        sess.run(cache_updater, tag='training')

        for _ in range(10):
            print("Predicting")
            sess.run(prediction_op, tag='prediction')
        end = time.time()
        print("Elapsed time: ", end - start)
"""Private prediction with a single clients"""
import tf_encrypted as tfe

from common import LogisticRegression, PredictionClient

num_features = 10

model = LogisticRegression(num_features)
prediction_client = PredictionClient('prediction-client', num_features)

x = tfe.define_private_input(prediction_client.player_name,
                             prediction_client.provide_input)

y = model.forward(x)

reveal_output = tfe.define_output(prediction_client.player_name, y,
                                  prediction_client.receive_output)

with tfe.Session() as sess:
    sess.run(tfe.global_variables_initializer(), tag='init')

    sess.run(reveal_output, tag='predict')
Example #11
0
ops = [
    tfe.assign(W, W - dW * learning_rate),
    tfe.assign(b, b - db * learning_rate)
]

# Testing model
pred_test = tfe.sigmoid(tfe.matmul(xp_test, W) + b)


def print_accuracy(pred_test_tf, y_test_tf: tf.Tensor) -> tf.Operation:
    correct_prediction = tf.equal(tf.round(pred_test_tf), y_test_tf)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    return tf.print("Accuracy", accuracy)


print_acc_op = tfe.define_output('input-provider', [pred_test, yp_test], print_accuracy)

total_batch = training_set_size // batch_size
with tfe.Session() as sess:
    sess.run(tfe.global_variables_initializer(), tag='init')

    for epoch in range(training_epochs):
        avg_cost = 0.

        for i in range(total_batch):
            _, y_out, p_out = sess.run([ops, yp.reveal(), pred.reveal()], tag='optimize')
            # Our sigmoid function is an approximation
            # it can have values outside of the range [0, 1], we remove them and add/substract an epsilon to compute the cost
            p_out = p_out * (p_out > 0) + 0.001
            p_out = p_out * (p_out < 1) + (p_out >= 1) * 0.999
            c = -np.mean(y_out * np.log(p_out) + (1 - y_out) * np.log(1 - p_out))
Example #12
0
        DataOwner("data-owner-0", "./data/train.tfrecord",
                  model_owner.build_update_step),
        DataOwner("data-owner-1", "./data/train.tfrecord",
                  model_owner.build_update_step),
        DataOwner("data-owner-2", "./data/train.tfrecord",
                  model_owner.build_update_step),
    ]

    model_grads = zip(*(tfe.define_private_input(data_owner.player_name,
                                                 data_owner.compute_gradient)
                        for data_owner in data_owners))

    with tf.name_scope('secure_aggregation'):
        aggregated_model_grads = [
            tfe.add_n(grads) / len(grads) for grads in model_grads
        ]

    iteration_op = tfe.define_output(model_owner.player_name,
                                     aggregated_model_grads,
                                     model_owner.update_model)

    with tfe.Session(target=session_target) as sess:
        sess.run(tf.global_variables_initializer(), tag='init')

        for i in range(model_owner.ITERATIONS):
            if i % 100 == 0:
                print("Iteration {}".format(i))
                sess.run(iteration_op, tag='iteration')
            else:
                sess.run(iteration_op)
Example #13
0
import tf_encrypted as tfe

from common import LogisticRegression, PredictionClient

num_features = 10

model = LogisticRegression(num_features)
prediction_client_0 = PredictionClient('prediction-client-0',
                                       num_features // 2)
prediction_client_1 = PredictionClient('prediction-client-1',
                                       num_features // 2)
result_receiver = prediction_client_0

x_0 = tfe.define_private_input(prediction_client_0.player_name,
                               prediction_client_0.provide_input)
x_1 = tfe.define_private_input(prediction_client_1.player_name,
                               prediction_client_1.provide_input)
x = tfe.concat([x_0, x_1], axis=1)

y = model.forward(x)

reveal_output = tfe.define_output(result_receiver.player_name, y,
                                  result_receiver.receive_output)

with tfe.Session() as sess:
    sess.run(tfe.global_variables_initializer(), tag='init')

    sess.run(reveal_output, tag='predict')