Example #1
0
    def call(self, inputs):

        if self.data_format != 'channels_first':
            inputs = tfe.transpose(inputs, perm=[0, 3, 1, 2])

        outputs = self._pool_function(inputs, self.pool_size, self.strides,
                                      self.padding)

        if self.data_format != 'channels_first':
            outputs = tfe.transpose(outputs, perm=[0, 2, 3, 1])

        return outputs
Example #2
0
    def forward(self, x: TFEVariable) -> TFEVariable:
        if not self.channels_first:
            x = tfe.transpose(x, perm=[0, 3, 1, 2])

        self.cached_input_shape = x.shape
        self.cache = x

        out = self.pool(x, self.pool_size, self.strides, self.padding)

        if not self.channels_first:
            out = tfe.transpose(out, perm=[0, 2, 3, 1])

        return out
Example #3
0
    def test_transpose(self):
        tf.reset_default_graph()

        prot = ABY3()
        tfe.set_protocol(prot)

        x = tfe.define_private_variable(tf.constant([[1, 2, 3], [4, 5, 6]]))
        y = tfe.define_constant(np.array([[1, 2, 3], [4, 5, 6]]))

        z1 = x.transpose()
        z2 = tfe.transpose(y)

        with tfe.Session() as sess:
            # initialize variables
            sess.run(tfe.global_variables_initializer())
            # reveal result
            result = sess.run(z1.reveal())
            np.testing.assert_allclose(
                result, np.array([[1, 4], [2, 5], [3, 6]]), rtol=0.0, atol=0.01
            )

            result = sess.run(z2)
            np.testing.assert_allclose(
                result, np.array([[1, 4], [2, 5], [3, 6]]), rtol=0.0, atol=0.01
            )
Example #4
0
    def forward(self, x):
        """Compute the forward convolution."""
        self.cached_input_shape = x.shape
        self.cache = x

        if not self.channels_first:
            x = tfe.transpose(x, perm=[0, 3, 1, 2])

        out = tfe.conv2d(x, self.weights, self.strides, self.padding)
        if self.bias is not None:
            out = out + self.bias

        if not self.channels_first:
            out = tfe.transpose(out, perm=[0, 2, 3, 1])

        return out
    def call(self, inputs):

        if self.data_format != 'channels_first':
            inputs = tfe.transpose(inputs, perm=[0, 3, 1, 2])

        outputs = tfe.conv2d(inputs, self.kernel, self.strides[0],
                             self.padding)

        if self.use_bias:
            outputs = outputs + self.bias

        if self.data_format != 'channels_first':
            outputs = tfe.transpose(outputs, perm=[0, 2, 3, 1])

        if self.activation is not None:
            return self.activation(outputs)
        return outputs
Example #6
0
 def backward(self, x, dy, learning_rate=0.01):
     batch_size = x.shape.as_list()[0]
     with tf.name_scope("backward"):
         dw = tfe.matmul(tfe.transpose(x), dy) / batch_size
         db = tfe.reduce_sum(dy, axis=0) / batch_size
         assign_ops = [
             tfe.assign(self.w, self.w - dw * learning_rate),
             tfe.assign(self.b, self.b - db * learning_rate),
         ]
         return assign_ops
    def rearrange_kernel(self, kernel):
        """ Rearrange kernel to match normal convoluion kernels
    Arguments:
      kernel: kernel to be rearranged
    """
        mask = self.get_mask(self.input_dim)

        if isinstance(kernel, tf.Tensor):
            mask = tf.constant(mask.tolist(),
                               dtype=tf.float32,
                               shape=(self.kernel_size[0], self.kernel_size[1],
                                      self.input_dim * self.depth_multiplier,
                                      self.input_dim))

            if self.depth_multiplier > 1:
                # rearrange kernel
                kernel = tf.transpose(kernel, [0, 1, 3, 2])
                kernel = tf.reshape(
                    kernel,
                    shape=self.kernel_size +
                    (self.input_dim * self.depth_multiplier, 1))

            kernel = tf.multiply(kernel, mask)

        elif isinstance(kernel, np.ndarray):
            if self.depth_multiplier > 1:
                # rearrange kernel
                kernel = np.transpose(kernel, [0, 1, 3, 2])
                kernel = np.reshape(
                    kernel,
                    newshape=self.kernel_size +
                    (self.input_dim * self.depth_multiplier, 1))

            kernel = np.multiply(kernel, mask)

        elif isinstance(kernel, PondPrivateTensor):
            mask = tfe.define_public_variable(mask)
            if self.depth_multiplier > 1:
                # rearrange kernel
                kernel = tfe.transpose(kernel, [0, 1, 3, 2])
                kernel = tfe.reshape(
                    kernel,
                    shape=self.kernel_size +
                    (self.input_dim * self.depth_multiplier, 1))

            kernel = tfe.mul(kernel, mask)

        return kernel
Example #8
0
def test_simple_lr_model():
    tf.reset_default_graph()

    import time
    start = time.time()
    prot = ABY3()
    tfe.set_protocol(prot)

    # define inputs
    x_raw = tf.random.uniform(minval=-0.5,
                              maxval=0.5,
                              shape=[99, 10],
                              seed=1000)
    x = tfe.define_private_variable(x_raw, name="x")
    y_raw = tf.cast(tf.reduce_mean(x_raw, axis=1, keepdims=True) > 0,
                    dtype=tf.float32)
    y = tfe.define_private_variable(y_raw, name="y")
    w = tfe.define_private_variable(tf.random_uniform([10, 1],
                                                      -0.01,
                                                      0.01,
                                                      seed=100),
                                    name="w")
    b = tfe.define_private_variable(tf.zeros([1]), name="b")
    learning_rate = 0.01

    with tf.name_scope("forward"):
        out = tfe.matmul(x, w) + b
        y_hat = tfe.sigmoid(out)

    with tf.name_scope("loss-grad"):
        dy = y_hat - y
    batch_size = x.shape.as_list()[0]
    with tf.name_scope("backward"):
        dw = tfe.matmul(tfe.transpose(x), dy) / batch_size
        db = tfe.reduce_sum(dy, axis=0) / batch_size
        upd1 = dw * learning_rate
        upd2 = db * learning_rate
        assign_ops = [tfe.assign(w, w - upd1), tfe.assign(b, b - upd2)]

    with tfe.Session() as sess:
        # initialize variables
        sess.run(tfe.global_variables_initializer())
        for i in range(1):
            sess.run(assign_ops)

        print(sess.run(w.reveal()))
    end = time.time()
    print("Elapsed time: {} seconds".format(end - start))
Example #9
0
def _transpose(converter, node: Any, inputs: List[str]) -> Any:
  x_in = converter.outputs[inputs[0]]
  perm = converter.outputs[inputs[1]]

  tensor = perm.attr["value"].tensor
  shape = [i.size for i in tensor.tensor_shape.dim]

  dtype = perm.attr["dtype"].type
  if dtype == tf.int32:
    nums = array.array('i', tensor.tensor_content)
  elif dtype == tf.int64:
    nums = array.array('l', tensor.tensor_content)
  else:
    raise TypeError("Unsupported dtype for transpose perm")

  return tfe.transpose(x_in, np.array(nums).reshape(shape))
Example #10
0
    def call(self, inputs):
        input_shape = inputs.shape.as_list()
        rank = len(input_shape)

        if self.data_format == "channels_first" and rank > 1:
            permutation = [0]
            permutation.extend(i for i in range(2, rank))
            permutation.append(1)
            inputs = tfe.transpose(inputs, perm=permutation)

        if rank == 1:
            flatten_shape = [input_shape[0], 1]
        else:
            flatten_shape = [input_shape[0], -1]

        outputs = tfe.reshape(inputs, flatten_shape)

        return outputs
Example #11
0
xp, yp = tfe.define_private_input('input-provider', lambda: gen_training_input(training_set_size, nb_feats, batch_size))
xp_test, yp_test = tfe.define_private_input('input-provider', lambda: gen_test_input(training_set_size, nb_feats, batch_size))

W = tfe.define_private_variable(tf.random_uniform([nb_feats, 1], -0.01, 0.01))
b = tfe.define_private_variable(tf.zeros([1]))

# Training model
out = tfe.matmul(xp, W) + b
pred = tfe.sigmoid(out)
# Due to missing log function approximation, we need to compute the cost in numpy
# cost = -tfe.sum(y * tfe.log(pred) + (1 - y) * tfe.log(1 - pred)) * (1/train_batch_size)

# Backprop
dc_dout = pred - yp
dW = tfe.matmul(tfe.transpose(xp), dc_dout) * (1 / batch_size)
db = tfe.reduce_sum(1. * dc_dout, axis=0) * (1 / batch_size)
ops = [
    tfe.assign(W, W - dW * learning_rate),
    tfe.assign(b, b - db * learning_rate)
]

# Testing model
pred_test = tfe.sigmoid(tfe.matmul(xp_test, W) + b)


def print_accuracy(pred_test_tf, y_test_tf: tf.Tensor) -> tf.Operation:
    correct_prediction = tf.equal(tf.round(pred_test_tf), y_test_tf)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    return tf.print("Accuracy", accuracy)