Esempio n. 1
0
def _flatten(converter, node, inputs):
  x_in = converter.outputs[inputs[0]]

  shape = x_in.shape.as_list()
  non_batch = 1
  for dim in shape[1:]:
    non_batch *= dim

  return tfe.reshape(x_in, [-1, non_batch])
Esempio n. 2
0
def main(server):

    num_rows = 7000
    num_features = 32
    num_epoch = 5
    batch_size = 200
    num_batches = (num_rows // batch_size) * num_epoch

    #who shall receive the output
    model_owner = ModelOwner('alice')

    data_schema0 = DataSchema([tf.float64] * 16, [0.0] * 16)
    data_schema1 = DataSchema([tf.int64] + [tf.float64] * 16, [0] + [0.0] * 16)
    data_owner_0 = DataOwner('alice',
                             'aliceTrainFile.csv',
                             data_schema0,
                             batch_size=batch_size)
    data_owner_1 = DataOwner('bob',
                             'bobTrainFileWithLabel.csv',
                             data_schema1,
                             batch_size=batch_size)

    tfe.set_protocol(
        tfe.protocol.Pond(
            tfe.get_config().get_player(data_owner_0.player_name),
            tfe.get_config().get_player(data_owner_1.player_name)))

    x_train_0 = tfe.define_private_input(data_owner_0.player_name,
                                         data_owner_0.provide_data)
    x_train_1 = tfe.define_private_input(data_owner_1.player_name,
                                         data_owner_1.provide_data)
    y_train = tfe.gather(x_train_1, 0, axis=1)
    y_train = tfe.reshape(y_train, [batch_size, 1])

    #Remove bob's first column (which is label)
    x_train_1 = tfe.strided_slice(x_train_1, [0, 1],
                                  [x_train_1.shape[0], x_train_1.shape[1]],
                                  [1, 1])

    x_train = tfe.concat([x_train_0, x_train_1], axis=1)

    model = LogisticRegression(num_features)
    reveal_weights_op = model_owner.receive_weights(model.weights)
    with tfe.Session() as sess:
        sess.run(tfe.global_variables_initializer(), tag='init')
        start_time = time.time()
        model.fit(sess, x_train, y_train, num_batches)
        end_time = time.time()
        # TODO(Morten)
        # each evaluation results in nodes for a forward pass being added to the graph;
        # maybe there's some way to avoid this, even if it means only if the shapes match
        model.evaluate(sess, x_train, y_train, data_owner_0)
        model.evaluate(sess, x_train, y_train, data_owner_1)

        print(sess.run(reveal_weights_op, tag='reveal'),
              ((end_time - start_time) * 1000))
Esempio n. 3
0
def _reshape(converter, node: Any, inputs: List[str]) -> Any:
  x_in = converter.outputs[inputs[0]]
  shape = converter.outputs[inputs[1]]

  tensor = shape.attr["value"].tensor
  dtype = shape.attr["dtype"].type
  if dtype == tf.int32:
    nums = array.array('i', tensor.tensor_content)
  elif dtype == tf.int64:
    nums = array.array('l', tensor.tensor_content)
  else:
    raise TypeError("Unsupported dtype for reshape shape")

  return tfe.reshape(x_in, list(nums))
Esempio n. 4
0
    def rearrange_kernel(self, kernel):
        """ Rearrange kernel to match normal convoluion kernels
    Arguments:
      kernel: kernel to be rearranged
    """
        mask = self.get_mask(self.input_dim)

        if isinstance(kernel, tf.Tensor):
            mask = tf.constant(mask.tolist(),
                               dtype=tf.float32,
                               shape=(self.kernel_size[0], self.kernel_size[1],
                                      self.input_dim * self.depth_multiplier,
                                      self.input_dim))

            if self.depth_multiplier > 1:
                # rearrange kernel
                kernel = tf.transpose(kernel, [0, 1, 3, 2])
                kernel = tf.reshape(
                    kernel,
                    shape=self.kernel_size +
                    (self.input_dim * self.depth_multiplier, 1))

            kernel = tf.multiply(kernel, mask)

        elif isinstance(kernel, np.ndarray):
            if self.depth_multiplier > 1:
                # rearrange kernel
                kernel = np.transpose(kernel, [0, 1, 3, 2])
                kernel = np.reshape(
                    kernel,
                    newshape=self.kernel_size +
                    (self.input_dim * self.depth_multiplier, 1))

            kernel = np.multiply(kernel, mask)

        elif isinstance(kernel, PondPrivateTensor):
            mask = tfe.define_public_variable(mask)
            if self.depth_multiplier > 1:
                # rearrange kernel
                kernel = tfe.transpose(kernel, [0, 1, 3, 2])
                kernel = tfe.reshape(
                    kernel,
                    shape=self.kernel_size +
                    (self.input_dim * self.depth_multiplier, 1))

            kernel = tfe.mul(kernel, mask)

        return kernel
Esempio n. 5
0
    def call(self, inputs):
        input_shape = inputs.shape.as_list()
        rank = len(input_shape)

        if self.data_format == "channels_first" and rank > 1:
            permutation = [0]
            permutation.extend(i for i in range(2, rank))
            permutation.append(1)
            inputs = tfe.transpose(inputs, perm=permutation)

        if rank == 1:
            flatten_shape = [input_shape[0], 1]
        else:
            flatten_shape = [input_shape[0], -1]

        outputs = tfe.reshape(inputs, flatten_shape)

        return outputs
Esempio n. 6
0
# get model parameters as private tensors from model owner
params = tfe.define_private_input('model-trainer', model_trainer.provide_input, masked=True)  # pylint: disable=E0632

# we'll use the same parameters for each prediction so we cache them to avoid re-training each time
params = tfe.cache(params)

# get prediction input from client
x, y = tfe.define_private_input('prediction-client', prediction_client.provide_input, masked=True)  # pylint: disable=E0632

# helpers
conv = lambda x, w, s: tfe.conv2d(x, w, s, 'VALID')
pool = lambda x: tfe.avgpool2d(x, (2, 2), (2, 2), 'VALID')

# compute prediction
Wconv1, bconv1, Wfc1, bfc1, Wfc2, bfc2 = params
bconv1 = tfe.reshape(bconv1, [-1, 1, 1])
layer1 = pool(tfe.relu(conv(x, Wconv1, ModelTrainer.STRIDE) + bconv1))
layer1 = tfe.reshape(layer1, [-1, ModelTrainer.HIDDEN_FC1])
layer2 = tfe.matmul(layer1, Wfc1) + bfc1
logits = tfe.matmul(layer2, Wfc2) + bfc2

# send prediction output back to client
prediction_op = tfe.define_output('prediction-client', [logits, y], prediction_client.receive_output)


with tfe.Session() as sess:
    print("Init")
    sess.run(tf.global_variables_initializer(), tag='init')

    print("Training")
    sess.run(tfe.global_caches_updater(), tag='training')
Esempio n. 7
0
    conv2_tfe.initialize(initial_weights=wconv2)

    avg_pool1 = tfe.layers.AveragePooling2D(input_shape=[-1, 24, 24, 20],
                                            pool_size=(2, 2),
                                            strides=(2, 2),
                                            padding='VALID',
                                            channels_first=False)

    avg_pool2 = tfe.layers.AveragePooling2D(input_shape=[-1, 8, 8, 50],
                                            pool_size=(2, 2),
                                            strides=(2, 2),
                                            padding='VALID',
                                            channels_first=False)

    x = tfe.reshape(x, [-1, 28, 28, 1])
    layer1 = avg_pool1.forward(tfe.relu(conv1_tfe.forward(x) + bconv1))
    layer2 = avg_pool2.forward(tfe.relu(conv2_tfe.forward(layer1) + bconv2))
    layer2 = tfe.reshape(layer2, [-1, ModelTrainer.HIDDEN_FC1])
    layer3 = tfe.relu(tfe.matmul(layer2, wfc1) + bfc1)
    logits = tfe.matmul(layer3, wfc2) + bfc2

    # send prediction output back to client
    prediction_op = tfe.define_output('prediction-client', [logits, y],
                                      prediction_client.receive_output)

    with tfe.Session() as sess:
        print("Init")
        sess.run(tf.global_variables_initializer(), tag='init')

        print("Training")
Esempio n. 8
0
 def forward(self, x):
     y = tfe.reshape(x, self.output_shape)
     self.layer_output = y
     return y
Esempio n. 9
0
    data_owner_1 = DataOwner('bob',
                             f'{name}/tfe_label_train.csv',
                             data_schema1,
                             batch_size=batch_size)

    tfe.set_protocol(
        tfe.protocol.Pond(
            tfe.get_config().get_player(data_owner_0.player_name),
            tfe.get_config().get_player(data_owner_1.player_name)))

    x_train_0 = tfe.define_private_input(data_owner_0.player_name,
                                         data_owner_0.provide_data)
    x_train_1 = tfe.define_private_input(data_owner_1.player_name,
                                         data_owner_1.provide_data)
    y_train = tfe.gather(x_train_1, 0, axis=1)
    y_train = tfe.reshape(y_train, [batch_size, 1])

    # Remove bob's first column (which is label)
    x_train_1 = tfe.strided_slice(x_train_1, [0, 1],
                                  [x_train_1.shape[0], x_train_1.shape[1]],
                                  [1, 1])
    x_train = tfe.concat([x_train_0, x_train_1], axis=1)

    model = LinearRegression(num_features)
    # model = LogisticRegression(num_features)
    fit_forward_op = model.fit_forward(x_train,
                                       y_train,
                                       learning_rate=case['l'])
    reveal_weights_op = model_owner.receive_weights(model.weights)

    # prepare test data