Exemplo n.º 1
0
def test_matmul(test_dir, backend, a_shape, b_shape, transpose_a, transpose_b,
                bisModel, dtype):
    if backend == "2PC_HE":
        pytest.skip(
            "Assertion error in 2PC_HE FCField::matrix_multiplication Assertion `num_cols == 1' failed."
        )
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    b_inp = dtype(np.random.randn(*b_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        if bisModel:
            b = tf.constant(b_inp, name="b")
        else:
            b = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                         shape=b_inp.shape,
                                         name="b")
        output = tf.matmul(a, b, transpose_a, transpose_b, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        feed_dict = {a: a_inp}
        if not bisModel:
            feed_dict[b] = b_inp
        expected_output = sess.run(output, feed_dict=feed_dict)
    config = Config(backend).add_input(a).add_output(output)
    if not bisModel:
        config.add_input(b)
    config.config["scale"] = 12
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemplo n.º 2
0
def test_conv_transpose(
    test_dir,
    backend,
    tfOp,
    a_shape,
    kernel_shape,
    output_shape,
    strides,
    padding,
    dtype,
):
    if backend in ["2PC_HE", "2PC_OT"]:
        pytest.skip("[conv3d] Missing Support in SCI")
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    kernel_inp = dtype(np.random.randn(*kernel_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
        filters = tf.constant(kernel_inp, name="filter")
        output = tfOp(a, filters, output_shape, strides, padding, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    config.config["scale"] = 12
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
    return
Exemplo n.º 3
0
def test_fused_batch_norm(test_dir, backend, tfOp, a_shape, scale, offset,
                          mean, variance, dtype):
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        output = tfOp(
            x=a,
            scale=scale,
            offset=offset,
            mean=mean,
            variance=variance,
            is_training=False,
            name="output",
        )
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})
    assert expected_output is not None
    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemplo n.º 4
0
def test_pool(test_dir, backend, tfOp, a_shape, ksize, strides, padding,
              data_format, dtype):
    if backend.startswith("2PC") and tfOp == tf.nn.max_pool:
        pytest.skip("[SCI][maxpool] Output mismatch bug")
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        output = tfOp(
            a,
            ksize=ksize,
            strides=strides,
            padding=padding,
            data_format=data_format,
            name="output",
        )
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemplo n.º 5
0
def test_fill(test_dir, backend, a_shape, value):
    graph = tf.Graph()
    with graph.as_default():
        output = tf.fill(a_shape, value)
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output)

    config = Config(backend).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([], timeoutSeconds=60)
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemplo n.º 6
0
def test_non_linear(test_dir, backend, tfOp, a_shape, dtype):
    if backend not in ["2PC_OT", "CPP"] and tfOp in [
            tf.math.sqrt,
            tf.math.rsqrt,
            tf.math.sigmoid,
            tf.math.tanh,
    ]:
        pytest.skip(
            "Operation {op} not supported for backend {backend}".format(
                op=tfOp.__name__, backend=backend))
    if a_shape == []:
        pytest.skip(
            "[Athos] Missing Support for tan/sig/sqrt/relu of scalar (0-d) variables"
        )

    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    if tfOp in [tf.math.sqrt, tf.math.rsqrt]:
        a_inp = np.abs(a_inp)

    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        output = tfOp(a, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})
    assert expected_output is not None
    config = Config(backend).add_input(a).add_output(output)
    config.config["scale"] = 12
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemplo n.º 7
0
def test_div(test_dir, backend, tfOp, a_val, divisor, dtype):
    graph = tf.Graph()
    a_inp = np.array(a_val)
    with graph.as_default():
        b = tf.constant(divisor, name="b")
        a = tf.compat.v1.placeholder(tf.as_dtype(b.dtype), shape=a_inp.shape, name="a")
        output = tfOp(a, b, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
    return
Exemplo n.º 8
0
def test_bias_add(test_dir, backend, a_shape, b_shape, data_format, dtype):
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    b_inp = dtype(np.random.randn(*b_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
        b = tf.constant(b_inp, name="b")
        output = tf.nn.bias_add(value=a, bias=b, data_format=data_format, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
    return
Exemplo n.º 9
0
def test_softmax(test_dir, backend, a_shape, axis, dtype):
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        output = tf.nn.softmax(a, axis=axis, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})
    assert expected_output is not None
    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemplo n.º 10
0
def test_uop(test_dir, backend, tfOp, a_shape, dtype):
    if backend.startswith("2PC") and tfOp == tf.math.square:
        pytest.skip("[SCI][square] Secret Secret mul not implemented")
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        output = tfOp(a, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemplo n.º 11
0
def test_conv(test_dir, backend, tfOp, a_shape, kernel_shape, strides, padding,
              dtype):
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    kernel_inp = dtype(np.random.randn(*kernel_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        filters = tf.constant(kernel_inp, name="filter")
        output = tfOp(a, filters, strides, padding, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemplo n.º 12
0
def test_pad(test_dir, backend, a_shape, paddings, mode, constant_values,
             dtype):
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        pad = tf.constant(paddings, name="paddings")
        output = tf.pad(a,
                        pad,
                        mode=mode,
                        constant_values=constant_values,
                        name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemplo n.º 13
0
def test_split(test_dir, backend, a_shape, num_or_size_splits, axis, dtype):
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        output = tf.split(a, num_or_size_splits, axis, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    if type(output) == list:
        tf_output = output[-1]
        tf_expected_output = expected_output[-1]
    else:
        tf_output = output
        tf_expected_output = expected_output
    config = Config(backend).add_input(a).add_output(tf_output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=tf_expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return