예제 #1
0
def main(server):

    num_rows = 7000
    num_features = 32
    num_epoch = 5
    batch_size = 200
    num_batches = (num_rows // batch_size) * num_epoch

    #who shall receive the output
    model_owner = ModelOwner('alice')

    data_schema0 = DataSchema([tf.float64] * 16, [0.0] * 16)
    data_schema1 = DataSchema([tf.int64] + [tf.float64] * 16, [0] + [0.0] * 16)
    data_owner_0 = DataOwner('alice',
                             'aliceTrainFile.csv',
                             data_schema0,
                             batch_size=batch_size)
    data_owner_1 = DataOwner('bob',
                             'bobTrainFileWithLabel.csv',
                             data_schema1,
                             batch_size=batch_size)

    tfe.set_protocol(
        tfe.protocol.Pond(
            tfe.get_config().get_player(data_owner_0.player_name),
            tfe.get_config().get_player(data_owner_1.player_name)))

    x_train_0 = tfe.define_private_input(data_owner_0.player_name,
                                         data_owner_0.provide_data)
    x_train_1 = tfe.define_private_input(data_owner_1.player_name,
                                         data_owner_1.provide_data)
    y_train = tfe.gather(x_train_1, 0, axis=1)
    y_train = tfe.reshape(y_train, [batch_size, 1])

    #Remove bob's first column (which is label)
    x_train_1 = tfe.strided_slice(x_train_1, [0, 1],
                                  [x_train_1.shape[0], x_train_1.shape[1]],
                                  [1, 1])

    x_train = tfe.concat([x_train_0, x_train_1], axis=1)

    model = LogisticRegression(num_features)
    reveal_weights_op = model_owner.receive_weights(model.weights)
    with tfe.Session() as sess:
        sess.run(tfe.global_variables_initializer(), tag='init')
        start_time = time.time()
        model.fit(sess, x_train, y_train, num_batches)
        end_time = time.time()
        # TODO(Morten)
        # each evaluation results in nodes for a forward pass being added to the graph;
        # maybe there's some way to avoid this, even if it means only if the shapes match
        model.evaluate(sess, x_train, y_train, data_owner_0)
        model.evaluate(sess, x_train, y_train, data_owner_1)

        print(sess.run(reveal_weights_op, tag='reveal'),
              ((end_time - start_time) * 1000))
예제 #2
0
    def test_add_private_private(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        def provide_input():
            # normal TensorFlow operations can be run locally
            # as part of defining a private input, in this
            # case on the machine of the input provider
            return tf.ones(shape=(2, 2)) * 1.3

        # define inputs
        x = tfe.define_private_variable(tf.ones(shape=(2, 2)))
        y = tfe.define_private_input('input-provider', provide_input)

        # define computation
        z = x + y

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            result = sess.run(z.reveal())
            # Should be [[2.3, 2.3], [2.3, 2.3]]
            np.testing.assert_allclose(result,
                                       np.array([[2.3, 2.3], [2.3, 2.3]]),
                                       rtol=0.0,
                                       atol=0.01)
            print("test_add_private_private succeeds")
예제 #3
0
    def test_iterate_private(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        def provide_input():
            return tf.reshape(tf.range(0, 8), [4, 2])

        # define inputs
        x = tfe.define_private_input('input-provider', provide_input)

        write_op = x.write("x.tfrecord")

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            sess.run(write_op)

        x = tfe.read("x.tfrecord", batch_size=5, n_columns=2)
        y = tfe.iterate(x, batch_size=3, repeat=True, shuffle=False)
        z = tfe.iterate(x, batch_size=3, repeat=True, shuffle=True)
        with tfe.Session() as sess:
            sess.run(tfe.global_variables_initializer())
            print(sess.run(x.reveal()))
            print(sess.run(y.reveal()))
            print(sess.run(y.reveal()))
            print(sess.run(x.reveal()))
            print(sess.run(z.reveal()))
예제 #4
0
    def test_read_private(self):

        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        def provide_input():
            return tf.reshape(tf.range(0, 8), [4, 2])

        # define inputs
        x = tfe.define_private_input('input-provider', provide_input)

        write_op = x.write("x.tfrecord")

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            sess.run(write_op)

        x = tfe.read("x.tfrecord", batch_size=5, n_columns=2)
        with tfe.Session() as sess:
            result = sess.run(x.reveal())
            np.testing.assert_allclose(result,
                                       np.array(list(range(0, 8)) +
                                                [0, 1]).reshape([5, 2]),
                                       rtol=0.0,
                                       atol=0.01)
            print("test_read_private succeeds")
예제 #5
0
    def test_mul_trunc2_private_private(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        def provide_input():
            # normal TensorFlow operations can be run locally
            # as part of defining a private input, in this
            # case on the machine of the input provider
            return tf.ones(shape=(2, 2)) * 1.3

        # define inputs
        x = tfe.define_private_variable(tf.ones(shape=(2, 2)) * 2)
        y = tfe.define_private_input("input-provider", provide_input)

        # define computation
        z = tfe.mul_trunc2(x, y)

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            result = sess.run(z.reveal(), tag="mul_trunc2")
            np.testing.assert_allclose(
                result, np.array([[2.6, 2.6], [2.6, 2.6]]), rtol=0.0, atol=0.01
            )
예제 #6
0
    def test_fifo(self):

        shape = (10, 10)

        with tfe.protocol.Pond():

            q = tfe.queue.FIFOQueue(capacity=10, shape=shape,)
            assert isinstance(q, tfe.protocol.pond.AdditiveFIFOQueue)

            raw = np.full(shape, 5)

            x = tfe.define_private_input("inputter", lambda: tf.convert_to_tensor(raw))
            assert isinstance(x, tfe.protocol.pond.PondPrivateTensor)

            enqueue_op = q.enqueue(x)

            y = q.dequeue()
            assert isinstance(y, tfe.protocol.pond.PondPrivateTensor)
            assert y.backing_dtype == x.backing_dtype
            assert y.shape == x.shape

        with tfe.Session() as sess:
            sess.run(enqueue_op)
            res = sess.run(y.reveal())

            np.testing.assert_array_equal(res, raw)
예제 #7
0
    def test_fp_sqrt_private(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)
        r = np.random.rand(16) * 100  # random in [0, 100)

        # define inputs
        x = tfe.define_private_input("input-provider", lambda: tf.constant(r))

        # define computation
        sqrt_x = prot.fp_sqrt(x).reveal()
        sqrt_inv_x = prot.fp_sqrt_inv(x).reveal()

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            sqrt_x, sqrt_inv_x = sess.run([sqrt_x, sqrt_inv_x], tag="fp_sqrt")
            np.testing.assert_allclose(sqrt_x,
                                       np.sqrt(r),
                                       rtol=0.03,
                                       atol=0.05)
            np.testing.assert_allclose(sqrt_inv_x,
                                       1. / np.sqrt(r),
                                       rtol=0.03,
                                       atol=0.05)
예제 #8
0
    def _register_op(self, node, inputs_iterable, input_player, graph_def):
        """Register single ops."""
        output = strip_tensor_info(node.name)
        if node.op == "Placeholder":
            try:
                _, item = inputs_iterable.__next__()
            except StopIteration:
                raise InvalidArgumentError("Not enough placeholders supplied")

            x = tfe.define_private_input(input_player, item)
            self.outputs[output] = x
            return

        out = self.registry[node.op](self, node, node.input)

        # if the operation returns a list or tuple with several ouputs,
        # identify the outputs node name
        if isinstance(out, (list, tuple)):
            output_name = find_output_names(graph_def, node.name, len(out))
            # If output_name is empty, it means this node
            # is the last one in the graph
            if not output_name:
                self.outputs[output] = out
            else:
                for i, _ in enumerate(out):
                    self.outputs[output_name[i]] = out[i]
        else:
            self.outputs[output] = out
예제 #9
0
    def test_read_private(self):

        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        def provide_input():
            return tf.reshape(tf.range(0, 8), [4, 2])

        # define inputs
        x = tfe.define_private_input("input-provider", provide_input)

        _, tmp_filename = tempfile.mkstemp()
        write_op = x.write(tmp_filename)

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            sess.run(write_op)

        x = tfe.read(tmp_filename, batch_size=5, n_columns=2)
        with tfe.Session() as sess:
            result = sess.run(x.reveal())
            np.testing.assert_allclose(
                result,
                np.array(list(range(0, 8)) + [0, 1]).reshape([5, 2]),
                rtol=0.0,
                atol=0.01,
            )

        os.remove(tmp_filename)
예제 #10
0
  def test_depthwise_conv2d_set_weights(self):
    input_shape = (1, 10, 10, 3)
    input_data = np.random.normal(size=input_shape)

    with tf.Session():
      model = tf.keras.models.Sequential()

      model.add(tf.keras.layers.DepthwiseConv2D(kernel_size=(2, 2),
                                                batch_input_shape=input_shape))

      expected = model.predict(input_data)
      k_weights = model.get_weights()
      k_config = model.get_config()

    with tfe.protocol.SecureNN():
      x = tfe.define_private_input(
          "inputter",
          lambda: tf.convert_to_tensor(input_data))

      tfe_model = tfe.keras.models.model_from_config(k_config)
      tfe_model.set_weights(k_weights)
      y = tfe_model(x)

    with KE.get_session() as sess:
      actual = sess.run(y.reveal())

      np.testing.assert_allclose(actual, expected, rtol=1e-2, atol=1e-2)

    KE.clear_session()
예제 #11
0
    def test_weights_as_private_var(self):
        input_shape = (1, 3)
        input_data = np.random.normal(size=input_shape)
        expected, k_weights, k_config = _model_predict_keras(
            input_data, input_shape)

        with tfe.protocol.SecureNN():
            x = tfe.define_private_input(
                "inputter", lambda: tf.convert_to_tensor(input_data))

            tfe_model = tfe.keras.models.model_from_config(k_config)
            weights_private_var = [
                tfe.define_private_variable(w) for w in k_weights
            ]

            with tfe.Session() as sess:
                for w in weights_private_var:
                    sess.run(w.initializer)

                tfe_model.set_weights(weights_private_var, sess)
                y = tfe_model(x)

                actual = sess.run(y.reveal())

                np.testing.assert_allclose(actual,
                                           expected,
                                           rtol=1e-2,
                                           atol=1e-3)
예제 #12
0
    def test_iterate_private(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        def provide_input():
            return tf.reshape(tf.range(0, 8), [4, 2])

        # define inputs
        x = tfe.define_private_input("input-provider", provide_input)

        _, tmp_filename = tempfile.mkstemp()
        write_op = x.write(tmp_filename)

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            sess.run(write_op)

        x = tfe.read(tmp_filename, batch_size=5, n_columns=2)
        y = tfe.iterate(x, batch_size=3, repeat=True, shuffle=False)
        z = tfe.iterate(x, batch_size=3, repeat=True, shuffle=True)
        with tfe.Session() as sess:
            sess.run(tfe.global_variables_initializer())
            # TODO: fix this test
            print(sess.run(x.reveal()))
            print(sess.run(y.reveal()))
            print(sess.run(y.reveal()))
            print(sess.run(x.reveal()))
            print(sess.run(z.reveal()))

        os.remove(tmp_filename)
예제 #13
0
def _matmul(converter, node: Any, inputs: List[str]) -> Any:
  a = converter.outputs[inputs[0]]
  b = converter.outputs[inputs[1]]

  tensor = b.attr["value"].tensor

  b_shape = [i.size for i in tensor.tensor_shape.dim]

  transpose_a = node.attr["transpose_a"].b
  transpose_b = node.attr["transpose_b"].b

  layer = Dense(a.shape.as_list(),
                b_shape[1],
                transpose_input=transpose_a,
                transpose_weight=transpose_b)

  dtype = tensor.dtype

  if dtype == tf.float32:
    nums = array.array('f', tensor.tensor_content)
  elif dtype == tf.float64:
    nums = array.array('d', tensor.tensor_content)
  else:
    raise TypeError("Unsupported dtype for weights")

  def inputter_fn():
    return tf.constant(np.array(nums).reshape(b_shape))

  w = tfe.define_private_input(converter.model_provider,
                                              inputter_fn)

  layer.initialize(initial_weights=w)

  return layer.forward(a)
예제 #14
0
    def test_write_private(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        def provide_input():
            # normal TensorFlow operations can be run locally
            # as part of defining a private input, in this
            # case on the machine of the input provider
            return tf.ones(shape=(2, 2)) * 1.3

        # define inputs
        x = tfe.define_private_input("input-provider", provide_input)

        _, tmp_filename = tempfile.mkstemp()
        write_op = x.write(tmp_filename)

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            sess.run(write_op)

        os.remove(tmp_filename)
예제 #15
0
def test_error():
    tf.reset_default_graph()

    prot = ABY3()
    tfe.set_protocol(prot)

    a = tf.random_uniform([6, 10000], -3, 3)
    b = tf.random_uniform([10000, 1], -0.1, 0.1)
    x = tfe.define_private_input("input-provider", lambda: a)
    y = tfe.define_private_input("input-provider", lambda: b)

    z = tfe.matmul(x, y)
    w = tf.matmul(a, b)

    with tfe.Session() as sess:
        sess.run(tfe.global_variables_initializer())
        for i in range(1000):
            result1, result2 = sess.run([z.reveal(), w])
            close(result1, result2, 0.1)
        print("test_error succeeds")
예제 #16
0
    def test_from_config(self):
        input_shape = (1, 3)
        input_data = np.random.normal(size=input_shape)
        expected, k_weights, k_config = _model_predict_keras(
            input_data, input_shape)

        with tfe.protocol.SecureNN():
            x = tfe.define_private_input(
                "inputter", lambda: tf.convert_to_tensor(input_data))

            tfe_model = Sequential.from_config(k_config)
            tfe_model.set_weights(k_weights)
            y = tfe_model(x)

        with KE.get_session() as sess:
            actual = sess.run(y.reveal())

            np.testing.assert_allclose(actual, expected, rtol=1e-2, atol=1e-3)

        KE.clear_session()
예제 #17
0
    def test_conv_model(self):

        num_classes = 10
        input_shape = (1, 28, 28, 1)
        input_data = np.random.normal(size=input_shape)

        with tf.Session():
            model = tf.keras.models.Sequential()

            model.add(
                tf.keras.layers.Conv2D(2, (3, 3),
                                       batch_input_shape=input_shape))
            model.add(tf.keras.layers.ReLU())
            model.add(tf.keras.layers.BatchNormalization())
            model.add(tf.keras.layers.AveragePooling2D((2, 2)))
            model.add(tf.keras.layers.Conv2D(2, (3, 3)))
            model.add(tf.keras.layers.ReLU())
            model.add(tf.keras.layers.BatchNormalization())
            model.add(tf.keras.layers.AveragePooling2D((2, 2)))
            model.add(tf.keras.layers.Flatten())
            model.add(tf.keras.layers.Dense(num_classes, name="logit"))

            expected = model.predict(input_data)
            k_weights = model.get_weights()
            k_config = model.get_config()

        with tfe.protocol.SecureNN():
            x = tfe.define_private_input(
                "inputter", lambda: tf.convert_to_tensor(input_data))

            tfe_model = tfe.keras.models.model_from_config(k_config)

            tfe_model.set_weights(k_weights)
            y = tfe_model(x)

        with KE.get_session() as sess:
            actual = sess.run(y.reveal())

            np.testing.assert_allclose(actual, expected, rtol=1e-2, atol=1e-2)

        KE.clear_session()
예제 #18
0
def test_sub_private_private():
    tf.reset_default_graph()

    prot = ABY3()
    tfe.set_protocol(prot)

    def provide_input():
        return tf.ones(shape=(2, 2)) * 1.3

    x = tfe.define_private_variable(tf.ones(shape=(2, 2)))
    y = tfe.define_private_input('input-provider', provide_input)

    z = x - y

    with tfe.Session() as sess:
        # initialize variables
        sess.run(tfe.global_variables_initializer())
        # reveal result
        result = sess.run(z.reveal())
        close(result, np.array([[-0.3, -0.3], [-0.3, -0.3]]))
        print("test_sub_private_private succeeds")
예제 #19
0
    def test_sub_private_private(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        def provide_input():
            return tf.ones(shape=(2, 2)) * 1.3

        x = tfe.define_private_variable(tf.ones(shape=(2, 2)))
        y = tfe.define_private_input("input-provider", provide_input)

        z = x - y

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            result = sess.run(z.reveal())
            expected = np.array([[-0.3, -0.3], [-0.3, -0.3]])
            np.testing.assert_allclose(result, expected, rtol=0.0, atol=0.01)
예제 #20
0
def _nodef_to_private_pond(converter, x):
  """Map a NodeDef x to a PrivatePondTensor."""
  dtype = x.attr["dtype"].type
  warn_msg = "Unexpected dtype {} found at node {}"
  err_msg = "Unsupported dtype {} found at node {}"

  x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]

  if not x_shape:
    if dtype == tf.float32:
      nums = x.attr["value"].tensor.float_val
    elif dtype == tf.float64:
      nums = x.attr["value"].tensor.float_val
    elif dtype == tf.int32:
      logging.warning(warn_msg, dtype, x.name)
      nums = x.attr["value"].tensor.int_val
    else:
      raise TypeError(err_msg.format(dtype, x.name))

    def inputter_fn():
      return tf.constant(np.array(nums).reshape(1, 1))

  else:
    if dtype == tf.float32:
      nums = array.array('f', x.attr["value"].tensor.tensor_content)
    elif dtype == tf.float64:
      nums = array.array('d', x.attr["value"].tensor.tensor_content)
    elif dtype == tf.int32:
      logging.warning(warn_msg, dtype, x.name)
      nums = array.array('i', x.attr["value"].tensor.tensor_content)
    else:
      raise TypeError(err_msg.format(dtype, x.name))

    def inputter_fn():
      return tf.constant(np.array(nums).reshape(x_shape))

  x_private = tfe.define_private_input(
      converter.model_provider, inputter_fn)

  return x_private
예제 #21
0
    def test_clone_model(self):
        input_shape = (1, 3)
        input_data = np.random.normal(size=input_shape)

        model = tf.keras.models.Sequential()
        model.add(tf.keras.layers.Dense(2, batch_input_shape=input_shape))
        model.add(tf.keras.layers.Dense(3))
        expected = model.predict(input_data)

        with tfe.protocol.SecureNN():
            x = tfe.define_private_input(
                "inputter", lambda: tf.convert_to_tensor(input_data))

            tfe_model = tfe.keras.models.clone_model(model)

        with KE.get_session() as sess:
            y = tfe_model(x)
            actual = sess.run(y.reveal())

            np.testing.assert_allclose(actual, expected, rtol=1e-2, atol=1e-3)

        KE.clear_session()
예제 #22
0
    def test_write_private(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        def provide_input():
            # normal TensorFlow operations can be run locally
            # as part of defining a private input, in this
            # case on the machine of the input provider
            return tf.ones(shape=(2, 2)) * 1.3

        # define inputs
        x = tfe.define_private_input('input-provider', provide_input)

        write_op = x.write("x.tfrecord")

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            sess.run(write_op)
            print("test_write_private succeeds")
예제 #23
0
    def test_add_private_private(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        def provide_input():
            return tf.ones(shape=(2, 2)) * 1.3

        # define inputs
        x = tfe.define_private_variable(tf.ones(shape=(2, 2)))
        y = tfe.define_private_input('input-provider', provide_input)

        # define computation
        z = x + y

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            result = sess.run(z.reveal())
            # Should be [[2.3, 2.3], [2.3, 2.3]]
            expected = np.array([[2.3, 2.3], [2.3, 2.3]])
            np.testing.assert_allclose(result, expected, rtol=0.0, atol=0.01)
예제 #24
0
training_set_size = 2000
test_set_size = 100
batch_size = 100
num_batches = (training_set_size // batch_size) * 10

model_owner = ModelOwner('model-owner')
data_owner_0 = DataOwner('data-owner-0', num_features, training_set_size,
                         test_set_size, batch_size // 2)
data_owner_1 = DataOwner('data-owner-1', num_features, training_set_size,
                         test_set_size, batch_size // 2)

tfe.set_protocol(
    tfe.protocol.Pond(tfe.get_config().get_player(data_owner_0.player_name),
                      tfe.get_config().get_player(data_owner_1.player_name)))

x_train_0, y_train_0 = tfe.define_private_input(
    data_owner_0.player_name, data_owner_0.provide_training_data)
x_train_1, y_train_1 = tfe.define_private_input(
    data_owner_1.player_name, data_owner_1.provide_training_data)

x_test_0, y_test_0 = tfe.define_private_input(
    data_owner_0.player_name, data_owner_0.provide_testing_data)
x_test_1, y_test_1 = tfe.define_private_input(
    data_owner_1.player_name, data_owner_1.provide_testing_data)

x_train = tfe.concat([x_train_0, x_train_1], axis=0)
y_train = tfe.concat([y_train_0, y_train_1], axis=0)

model = LogisticRegression(num_features)
reveal_weights_op = tfe.define_output(model_owner.player_name, model.weights,
                                      model_owner.receive_weights)
예제 #25
0
"""Private prediction with a single clients"""
import tf_encrypted as tfe

from common import LogisticRegression, PredictionClient

num_features = 10

model = LogisticRegression(num_features)
prediction_client = PredictionClient('prediction-client', num_features)

x = tfe.define_private_input(prediction_client.player_name,
                             prediction_client.provide_input)

y = model.forward(x)

reveal_output = tfe.define_output(prediction_client.player_name, y,
                                  prediction_client.receive_output)

with tfe.Session() as sess:
    sess.run(tfe.global_variables_initializer(), tag='init')

    sess.run(reveal_output, tag='predict')
예제 #26
0
import numpy as np
import random as ran


def provide_data(features):
    dataset = tf.data.Dataset.from_tensor_slices(features)
    dataset = dataset.repeat()
    dataset = dataset.batch(10)
    iterator = dataset.make_one_shot_iterator()
    batch = iterator.get_next()
    batch = tf.reshape(batch, [10, 784])
    return batch


remote_config = tfe.RemoteConfig.load("config.json")
tfe.set_config(remote_config)

tfe.set_protocol(tfe.protocol.Pond())
players = remote_config.players
server0 = remote_config.server(players[0].name)

tfe.set_protocol(
    tfe.protocol.Pond(tfe.get_config().get_player("alice"),
                      tfe.get_config().get_player("bob")))

mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
train_data = mnist.train.images[:100, :]
train_labels = mnist.train.labels[:100]

x_train_0 = tfe.define_private_input("alice", lambda: provide_data(train_data))
예제 #27
0
            return op


if __name__ == "__main__":

    start = time.time()
    model_owner = ModelOwner(player_name="model-owner",
                             local_data_file="./data/train_half.tfrecord")

    prediction_client = PredictionClient(
        player_name="prediction-client",
        local_data_file="./data/test.tfrecord")

    # get model parameters as private tensors from model owner
    params = tfe.define_private_input(model_owner.player_name,
                                      model_owner.provide_input,
                                      masked=True)  # pylint: disable=E0632

    # we'll use the same parameters for each prediction so we cache them to
    # avoid re-training each time
    cache_updater, params = tfe.cache(params)

    # get prediction input from client
    x = tfe.define_private_input(prediction_client.player_name,
                                 prediction_client.provide_input,
                                 masked=True)  # pylint: disable=E0632

    # compute prediction
    w0, b0, w1, b1 = params
    layer0 = tfe.matmul(x, w0) + b0
    layer1 = tfe.sigmoid(layer0 *
예제 #28
0
    def receive_output(self, likelihoods: tf.Tensor, y_true: tf.Tensor) -> tf.Tensor:
        with tf.name_scope('post-processing'):
            prediction = tf.argmax(likelihoods, axis=1)
            eq_values = tf.equal(prediction, tf.cast(y_true, tf.int64))
            acc = tf.reduce_mean(tf.cast(eq_values, tf.float32))
            op = tf.Print([], [y_true], summarize=self.BATCH_SIZE, message="EXPECT: ")
            op = tf.Print(op, [prediction], summarize=self.BATCH_SIZE, message="ACTUAL: ")
            op = tf.Print([op], [acc], summarize=self.BATCH_SIZE, message="Acuraccy: ")
            return op


model_trainer = ModelTrainer()
prediction_client = PredictionClient()

# get model parameters as private tensors from model owner
params = tfe.define_private_input('model-trainer', model_trainer.provide_input, masked=True)  # pylint: disable=E0632

# we'll use the same parameters for each prediction so we cache them to avoid re-training each time
params = tfe.cache(params)

# get prediction input from client
x, y = tfe.define_private_input('prediction-client', prediction_client.provide_input, masked=True)  # pylint: disable=E0632

# helpers
conv = lambda x, w, s: tfe.conv2d(x, w, s, 'VALID')
pool = lambda x: tfe.avgpool2d(x, (2, 2), (2, 2), 'VALID')

# compute prediction
Wconv1, bconv1, Wfc1, bfc1, Wfc2, bfc2 = params
bconv1 = tfe.reshape(bconv1, [-1, 1, 1])
layer1 = pool(tfe.relu(conv(x, Wconv1, ModelTrainer.STRIDE) + bconv1))
예제 #29
0
    # config file was specified
    config_file = sys.argv[1]
    config = tfe.RemoteConfig.load(config_file)
    tfe.set_config(config)
    tfe.set_protocol(tfe.protocol.Pond())


def provide_input() -> tf.Tensor:
    # pick random tensor to be averaged
    return tf.random_normal(shape=(10, ))


if __name__ == '__main__':
    # get input from inputters as private values
    inputs = [
        tfe.define_private_input('inputter-0', provide_input),
        tfe.define_private_input('inputter-1', provide_input),
        tfe.define_private_input('inputter-2', provide_input),
        tfe.define_private_input('inputter-3', provide_input),
        tfe.define_private_input('inputter-4', provide_input),
    ]

    # sum all inputs and divide by count
    result = tfe.add_n(inputs) / len(inputs)

    def receive_output(average: tf.Tensor) -> tf.Operation:
        # simply print average
        return tf.print("Average:", average)

    # send result to receiver
    result_op = tfe.define_output('result-receiver', result, receive_output)
예제 #30
0
import tf_encrypted as tfe

from common import DataOwner, ModelOwner, LogisticRegression

num_features = 10
training_set_size = 2000
test_set_size = 100
batch_size = 100
num_batches = (training_set_size // batch_size) * 10

model = LogisticRegression(num_features)
model_owner = ModelOwner('model-owner')
data_owner = DataOwner('data-owner', num_features, training_set_size,
                       test_set_size, batch_size)

x_train, y_train = tfe.define_private_input(data_owner.player_name,
                                            data_owner.provide_training_data)
x_test, y_test = tfe.define_private_input(data_owner.player_name,
                                          data_owner.provide_testing_data)

reveal_weights_op = tfe.define_output(model_owner.player_name, model.weights,
                                      model_owner.receive_weights)

with tfe.Session() as sess:
    sess.run([tfe.global_variables_initializer(), data_owner.initializer],
             tag='init')

    model.fit(sess, x_train, y_train, num_batches)
    model.evaluate(sess, x_test, y_test, data_owner)

    sess.run(reveal_weights_op, tag='reveal')