Esempio n. 1
0
def _maxpool2d_public(prot: Pond, x: PondPublicTensor,
                      pool_size: Tuple[int, int], strides: Tuple[int, int],
                      padding: str) -> PondPublicTensor:
    """Logic for performing maxpool2d on public input."""
    with tf.name_scope('maxpool2d'):
        y_on_0, y_on_1, reshape_to = _im2col(prot, x, pool_size, strides,
                                             padding)
        im2col = PondPublicTensor(prot, y_on_0, y_on_1, x.is_scaled)
        i2c_max = im2col.reduce_max(axis=0)
        result = i2c_max.reshape(reshape_to).transpose([2, 3, 0, 1])
        return result
Esempio n. 2
0
    def test_private(self):

        x = np.array([21, 21, 21, 21, 21, 21, 21, 21],
                     dtype=np.int32).reshape(2, 2, 2)

        r = np.array([36, 20, 21, 22, 36, 20, 21, 22],
                     dtype=np.int32).reshape(2, 2, 2)

        beta = np.array([0, 0, 0, 0, 1, 1, 1, 1],
                        dtype=np.int32).reshape(2, 2, 2)

        expected = np.bitwise_xor(x > r, beta.astype(bool)).astype(np.int32)

        prot = tfe.protocol.SecureNN()

        bit_dtype = prot.prime_factory
        val_dtype = prot.tensor_factory

        res = _private_compare(
            prot,
            x_bits=PondPrivateTensor(
                prot,
                *prot._share(
                    val_dtype.tensor(
                        tf.convert_to_tensor(
                            x,
                            dtype=val_dtype.native_type)).to_bits(bit_dtype)),
                False),
            r=PondPublicTensor(
                prot,
                val_dtype.tensor(
                    tf.convert_to_tensor(r, dtype=val_dtype.native_type)),
                val_dtype.tensor(
                    tf.convert_to_tensor(r, dtype=val_dtype.native_type)),
                False),
            beta=PondPublicTensor(
                prot,
                bit_dtype.tensor(
                    tf.convert_to_tensor(beta, dtype=bit_dtype.native_type)),
                bit_dtype.tensor(
                    tf.convert_to_tensor(beta, dtype=bit_dtype.native_type)),
                False))

        with tfe.Session() as sess:
            actual = sess.run(res.reveal().value_on_0.to_native())
            np.testing.assert_array_equal(actual, expected)
Esempio n. 3
0
    def _core_test(self, tensor_factory):

        prot = tfe.protocol.SecureNN(tensor_factory=tensor_factory)

        bit_dtype = prot.prime_factory
        val_dtype = prot.tensor_factory

        x = np.array([21, 21, 21, 21, 21, 21, 21, 21], dtype=np.int32,).reshape(
            (2, 2, 2)
        )

        r = np.array([36, 20, 21, 22, 36, 20, 21, 22], dtype=np.int32,).reshape(
            (2, 2, 2)
        )

        beta = np.array([0, 0, 0, 0, 1, 1, 1, 1], dtype=np.int32,).reshape((2, 2, 2))

        expected = np.bitwise_xor(x > r, beta.astype(bool)).astype(np.int32)
        x_native = tf.convert_to_tensor(x, dtype=val_dtype.native_type)
        x_bits_preshare = val_dtype.tensor(x_native).bits(bit_dtype)
        x_bits = prot._share(x_bits_preshare)  # pylint: disable=protected-access

        r_native = tf.convert_to_tensor(r, dtype=val_dtype.native_type)
        r0 = r1 = val_dtype.tensor(r_native)

        beta_native = tf.convert_to_tensor(beta, dtype=bit_dtype.native_type)
        beta0 = beta1 = bit_dtype.tensor(beta_native)

        res = _private_compare(
            prot,
            x_bits=PondPrivateTensor(prot, *x_bits, False),
            r=PondPublicTensor(prot, r0, r1, False),
            beta=PondPublicTensor(prot, beta0, beta1, False),
        )

        with tfe.Session() as sess:
            actual = sess.run(res.reveal().value_on_0.to_native())
            np.testing.assert_array_equal(actual, expected)
Esempio n. 4
0
def _equal_zero_public(prot, x: PondPublicTensor, dtype=None):
    """Check if a public tensor's values equal zero."""

    with tf.name_scope('equal_zero'):

        x_on_0, x_on_1 = x.unwrapped

        with tf.device(prot.server_0.device_name):
            equal_zero_on_0 = x_on_0.equal_zero(dtype)

        with tf.device(prot.server_1.device_name):
            equal_zero_on_1 = x_on_1.equal_zero(dtype)

        return PondPublicTensor(prot, equal_zero_on_0, equal_zero_on_1, False)
Esempio n. 5
0
def _cast_backing_public(prot, x: PondPublicTensor, backing_dtype):
    """Cast a public tensor's backing dtype."""
    # See refactoring comment below under private version.

    x_on_0, x_on_1 = x.unwrapped

    with tf.name_scope("cast_backing"):

        with tf.device(prot.server_0.device_name):
            y_on_0 = x_on_0.cast(backing_dtype)

        with tf.device(prot.server_1.device_name):
            y_on_1 = x_on_1.cast(backing_dtype)

        return PondPublicTensor(prot, y_on_0, y_on_1, x.is_scaled)
Esempio n. 6
0
def _bits_public(prot, x: PondPublicTensor, factory=None) -> PondPublicTensor:
    """Converts a public tensor to its binary tensor representation."""

    factory = factory or prot.tensor_factory

    with tf.name_scope('bits'):

        x_on_0, x_on_1 = x.unwrapped

        with tf.device(prot.server_0.device_name):
            bits_on_0 = x_on_0.bits(factory)

        with tf.device(prot.server_1.device_name):
            bits_on_1 = x_on_1.bits(factory)

        return PondPublicTensor(prot, bits_on_0, bits_on_1, False)
Esempio n. 7
0
    def test_public_compare(self):

        expected = np.array([1, 0, 1, 0])

        with tfe.protocol.Pond() as prot:

            x_raw = prot.tensor_factory.constant(np.array([100, 200, 100, 300]))
            x = PondPublicTensor(prot, value_on_0=x_raw, value_on_1=x_raw, is_scaled=False)

            res = prot.equal(x, 100)

            with tfe.Session() as sess:
                sess.run(tf.global_variables_initializer())
                answer = sess.run(res)

            assert np.array_equal(answer, expected)
Esempio n. 8
0
def _private_compare(prot, x_bits: PondPrivateTensor, r: PondPublicTensor,
                     beta: PondPublicTensor):
    """Logic for private comparison."""
    # TODO[Morten] no need to check this (should be free)
    assert x_bits.backing_dtype == prot.prime_factory
    assert r.backing_dtype.native_type == prot.tensor_factory.native_type

    out_shape = r.shape
    out_dtype = r.backing_dtype
    prime_dtype = x_bits.backing_dtype
    bit_length = x_bits.shape[-1]

    assert r.shape == out_shape
    assert r.backing_dtype.native_type == out_dtype.native_type
    assert not r.is_scaled

    assert x_bits.shape[:-1] == out_shape
    assert x_bits.backing_dtype == prime_dtype
    assert not x_bits.is_scaled

    assert beta.shape == out_shape
    assert beta.backing_dtype == prime_dtype
    assert not beta.is_scaled

    with tf.name_scope('private_compare'):

        with tf.name_scope('bit_comparisons'):

            # use either r or t = r + 1 according to beta
            s = prot.select(prot.cast_backing(beta, out_dtype), r, r + 1)
            s_bits = prot.bits(s, factory=prime_dtype)
            assert s_bits.shape[-1] == bit_length

            # compute w_sum
            w_bits = prot.bitwise_xor(x_bits, s_bits)
            w_sum = prot.cumsum(w_bits, axis=-1, reverse=True, exclusive=True)
            assert w_sum.backing_dtype == prime_dtype

            # compute c, ignoring edge cases at first
            sign = prot.select(beta, 1, -1)
            sign = prot.expand_dims(sign, axis=-1)
            c_except_edge_case = (s_bits - x_bits) * sign + 1 + w_sum

            assert c_except_edge_case.backing_dtype == prime_dtype

        with tf.name_scope('edge_cases'):

            # adjust for edge cases, i.e. where beta is 1 and s is zero (meaning r was -1)

            # identify edge cases
            edge_cases = prot.bitwise_and(beta,
                                          prot.equal_zero(s, prime_dtype))
            edge_cases = prot.expand_dims(edge_cases, axis=-1)

            # tensor for edge cases: one zero and the rest ones
            c_edge_vals = [0] + [1] * (bit_length - 1)
            c_const = tf.constant(c_edge_vals,
                                  dtype=prime_dtype.native_type,
                                  shape=(1, bit_length))
            c_edge_case_raw = prime_dtype.tensor(c_const)
            c_edge_case = prot._share_and_wrap(c_edge_case_raw, False)

            c = prot.select(edge_cases, c_except_edge_case, c_edge_case)
            assert c.backing_dtype == prime_dtype

        with tf.name_scope('zero_search'):

            # generate multiplicative mask to hide non-zero values
            with tf.device(prot.server_0.device_name):
                mask_raw = prime_dtype.sample_uniform(c.shape, minval=1)
                mask = PondPublicTensor(prot, mask_raw, mask_raw, False)

            # mask non-zero values; this is safe when we're in a prime dtype (since it's a field)
            c_masked = c * mask
            assert c_masked.backing_dtype == prime_dtype

            # TODO[Morten] permute

            # reconstruct masked values on server 2 to find entries with zeros
            with tf.device(prot.server_2.device_name):
                d = prot._reconstruct(*c_masked.unwrapped)
                # find all zero entries
                zeros = d.equal_zero(out_dtype)
                # for each bit sequence, determine whether it has one or no zero in it
                rows_with_zeros = zeros.reduce_sum(axis=-1, keepdims=False)
                # reshare result
                result = prot._share_and_wrap(rows_with_zeros, False)

        assert result.backing_dtype.native_type == out_dtype.native_type
        return result
Esempio n. 9
0
def _lsb_private(prot, x: PondPrivateTensor):
    """
  Logic for finding the least significant bit of a private tensor
  in binary representation.
  """

    # TODO[Morten] in the refactor these could be type parameters
    odd_dtype = x.backing_dtype
    out_dtype = prot.tensor_factory
    prime_dtype = prot.prime_factory

    assert odd_dtype.modulus % 2 == 1

    # needed for security because of `r` masking
    assert x.backing_dtype.native_type == odd_dtype.native_type

    with tf.name_scope('lsb'):

        with tf.name_scope('blind'):

            # ask server2 to generate r mask and its bits
            with tf.device(prot.server_2.device_name):
                r0 = odd_dtype.sample_uniform(x.shape)
                r1 = odd_dtype.sample_uniform(x.shape)
                r = PondPrivateTensor(prot, r0, r1, False)

                r_raw = r0 + r1
                rbits_raw = r_raw.bits(factory=prime_dtype)
                rbits = prot._share_and_wrap(rbits_raw, False)

                # TODO[Morten] once .bits() is cached then call .lsb() here instead
                rlsb_raw = rbits_raw[..., 0].cast(out_dtype)
                rlsb = prot._share_and_wrap(rlsb_raw, False)

            # blind and reveal
            c = (x + r).reveal()
            c = prot.cast_backing(c, out_dtype)
            c.is_scaled = False

        with tf.name_scope('compare'):

            # ask either server0 and server1 to generate beta (distributing load)
            server = random.choice([prot.server_0, prot.server_1])
            with tf.device(server.device_name):
                beta_raw = prime_dtype.sample_bits(x.shape)
                beta = PondPublicTensor(prot,
                                        beta_raw,
                                        beta_raw,
                                        is_scaled=False)

            greater_xor_beta = _private_compare(prot, rbits, c, beta)
            clsb = prot.lsb(c)

        with tf.name_scope('unblind'):
            gamma = prot.bitwise_xor(greater_xor_beta,
                                     prot.cast_backing(beta, out_dtype))
            delta = prot.bitwise_xor(rlsb, clsb)
            alpha = prot.bitwise_xor(gamma, delta)

        assert alpha.backing_dtype is out_dtype
        return alpha