예제 #1
0
def _slice(converter, node, inputs):
  x_in = converter.outputs[inputs[0]]
  begin = _nodef_to_numpy_array(converter.outputs[inputs[1]])
  size = _nodef_to_numpy_array(converter.outputs[inputs[2]])

  if isinstance(x_in, tf.NodeDef):
    input_out = _nodef_to_private_pond(converter, x_in)
  else:
    input_out = x_in

  # Slice is a special case of strided_slice. Slice takes size (the number of
  # elements we want to slice) as an input. However strided_slice takes end
  # (integer until which the slicing takes place) as input.
  # We can infere the end parameter with : end[i] = begin[i] + size[i].
  # If size is negative, the stepping go towards smaller indices.
  # In this case we can infer the end parameter with: end[i] = input_shape[i] - size[i] + 1
  end = np.zeros(len(begin))
  input_shape = x_in.shape.as_list()

  # if size is negative take the input dimension
  for i in range(len(end)):  # pylint: disable=consider-using-enumerate
    if size[i] < 0:
      end[i] = input_shape[i] - size[i] + 1
    else:
      end[i] = begin[i] + size[i]

  return tfe.strided_slice(input_out, begin, end)
예제 #2
0
def _strided_slice(converter, node: Any, inputs: List[str]) -> Any:
  x_in = converter.outputs[inputs[0]]

  if isinstance(x_in, tf.NodeDef):
    input_out = _nodef_to_private_pond(converter, x_in)
  else:
    input_out = x_in

  begin = converter.outputs[inputs[1]]
  end = converter.outputs[inputs[2]]
  strides = converter.outputs[inputs[3]]

  begin_mask = node.attr["begin_mask"].i
  end_mask = node.attr["end_mask"].i
  ellipsis_mask = node.attr["ellipsis_mask"].i
  new_axis_mask = node.attr["new_axis_mask"].i
  shrink_axis_mask = node.attr["shrink_axis_mask"].i

  begin = tf.constant(begin.attr["value"].tensor)
  end = tf.constant(end.attr["value"].tensor)
  strides = tf.constant(strides.attr["value"].tensor)

  return tfe.strided_slice(input_out, begin, end,
                                          strides=strides,
                                          begin_mask=begin_mask,
                                          end_mask=end_mask,
                                          ellipsis_mask=ellipsis_mask,
                                          new_axis_mask=new_axis_mask,
                                          shrink_axis_mask=shrink_axis_mask)
예제 #3
0
def main(server):

    num_rows = 7000
    num_features = 32
    num_epoch = 5
    batch_size = 200
    num_batches = (num_rows // batch_size) * num_epoch

    #who shall receive the output
    model_owner = ModelOwner('alice')

    data_schema0 = DataSchema([tf.float64] * 16, [0.0] * 16)
    data_schema1 = DataSchema([tf.int64] + [tf.float64] * 16, [0] + [0.0] * 16)
    data_owner_0 = DataOwner('alice',
                             'aliceTrainFile.csv',
                             data_schema0,
                             batch_size=batch_size)
    data_owner_1 = DataOwner('bob',
                             'bobTrainFileWithLabel.csv',
                             data_schema1,
                             batch_size=batch_size)

    tfe.set_protocol(
        tfe.protocol.Pond(
            tfe.get_config().get_player(data_owner_0.player_name),
            tfe.get_config().get_player(data_owner_1.player_name)))

    x_train_0 = tfe.define_private_input(data_owner_0.player_name,
                                         data_owner_0.provide_data)
    x_train_1 = tfe.define_private_input(data_owner_1.player_name,
                                         data_owner_1.provide_data)
    y_train = tfe.gather(x_train_1, 0, axis=1)
    y_train = tfe.reshape(y_train, [batch_size, 1])

    #Remove bob's first column (which is label)
    x_train_1 = tfe.strided_slice(x_train_1, [0, 1],
                                  [x_train_1.shape[0], x_train_1.shape[1]],
                                  [1, 1])

    x_train = tfe.concat([x_train_0, x_train_1], axis=1)

    model = LogisticRegression(num_features)
    reveal_weights_op = model_owner.receive_weights(model.weights)
    with tfe.Session() as sess:
        sess.run(tfe.global_variables_initializer(), tag='init')
        start_time = time.time()
        model.fit(sess, x_train, y_train, num_batches)
        end_time = time.time()
        # TODO(Morten)
        # each evaluation results in nodes for a forward pass being added to the graph;
        # maybe there's some way to avoid this, even if it means only if the shapes match
        model.evaluate(sess, x_train, y_train, data_owner_0)
        model.evaluate(sess, x_train, y_train, data_owner_1)

        print(sess.run(reveal_weights_op, tag='reveal'),
              ((end_time - start_time) * 1000))
예제 #4
0
    tfe.set_protocol(
        tfe.protocol.Pond(
            tfe.get_config().get_player(data_owner_0.player_name),
            tfe.get_config().get_player(data_owner_1.player_name)))

    x_train_0 = tfe.define_private_input(data_owner_0.player_name,
                                         data_owner_0.provide_data)
    x_train_1 = tfe.define_private_input(data_owner_1.player_name,
                                         data_owner_1.provide_data)
    y_train = tfe.gather(x_train_1, 0, axis=1)
    y_train = tfe.reshape(y_train, [batch_size, 1])

    # Remove bob's first column (which is label)
    x_train_1 = tfe.strided_slice(x_train_1, [0, 1],
                                  [x_train_1.shape[0], x_train_1.shape[1]],
                                  [1, 1])
    x_train = tfe.concat([x_train_0, x_train_1], axis=1)

    model = LinearRegression(num_features)
    # model = LogisticRegression(num_features)
    fit_forward_op = model.fit_forward(x_train,
                                       y_train,
                                       learning_rate=case['l'])
    reveal_weights_op = model_owner.receive_weights(model.weights)

    # prepare test data
    test_x_data, test_y_data = load_test_data(f'{name}/tfe_features_test.csv',
                                              f'{name}/tfe_label_test.csv')

    with tfe.Session() as sess: