Exemple #1
0
def test_depthwise_conv(
    test_dir, backend, tfOp, a_shape, kernel_shape, strides, padding, dtype
):
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    kernel_inp = dtype(np.random.randn(*kernel_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
        filters = tf.constant(kernel_inp, name="filter")
        output = tfOp(a, filters, strides, padding, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    config.config["scale"] = 12
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output, mpc_tensor=mpc_output, precision=2)
    return
Exemple #2
0
def test_uop(test_dir, backend, tfOp, a_shape, dtype):
    if backend.startswith("2PC") and tfOp == tf.math.square:
        pytest.skip("[SCI][square] Secret Secret mul not implemented")
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        output = tfOp(a, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemple #3
0
def test_arith_binop(test_dir, backend, tfOp, a_shape, b_shape, dtype):
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    b_inp = dtype(np.random.randn(*b_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        b = tf.constant(b_inp, name="b")
        output = tfOp(x=a, y=b, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = TFConfig(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(model_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
def test_equal(test_dir, backend, a, b, dtype):
    graph = tf.Graph()
    a_inp = dtype(np.array(a))
    b_inp = dtype(np.array(b))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        b = tf.constant(b_inp, name="b")
        output = tf.math.equal(a, b, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemple #5
0
def test_matmul(test_dir, backend, a_shape, b_shape, bisModel, dtype):
    if backend == "2PC_HE" and a_shape[0] != 1:
        pytest.skip("HE only supports vector matrix multiplication")
    Op = "MatMul"
    a = np.random.randn(*a_shape).astype(dtype)
    b = np.random.randn(*b_shape).astype(dtype)
    out = np.matmul(a, b)
    node = onnx.helper.make_node(
        Op,
        inputs=["a", "b"],
        outputs=["out"],
    )
    if not bisModel:
        graph = make_onnx_graph(
            node,
            inputs=[a, b],
            outputs=[out],
            name=Op + "_test",
        )
        expected_output = run_onnx(graph, [a, b])
    else:
        graph = make_onnx_graph(
            node,
            inputs=[a],
            outputs=[out],
            tensors=[b],
            tensor_names=["b"],
            name=Op + "_test",
        )
        expected_output = run_onnx(graph, [a])
    config = ONNXConfig(backend).parse_io(graph)
    compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
    if not bisModel:
        mpc_output = compiler.compile_and_run([a, b])
    else:
        mpc_output = compiler.compile_and_run([a])
    assert_almost_equal(model_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
def test_split(test_dir, backend, a_shape, num_or_size_splits, axis, dtype):
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype), shape=a_inp.shape, name="a")
        output = tf.split(a, num_or_size_splits, axis, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    if type(output) == list:
        tf_output = output[-1]
        tf_expected_output = expected_output[-1]
    else:
        tf_output = output
        tf_expected_output = expected_output
    config = TFConfig(backend).add_input(a).add_output(tf_output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(
        model_output=tf_expected_output, mpc_tensor=mpc_output, precision=2
    )
    return
Exemple #7
0
def test_conv_transpose(
    test_dir,
    backend,
    tfOp,
    a_shape,
    kernel_shape,
    output_shape,
    strides,
    padding,
    dtype,
):
    if backend in ["2PC_HE", "2PC_OT"]:
        pytest.skip("[conv3d] Missing Support in SCI")
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    kernel_inp = dtype(np.random.randn(*kernel_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        filters = tf.constant(kernel_inp, name="filter")
        output = tfOp(a,
                      filters,
                      output_shape,
                      strides,
                      padding,
                      name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = TFConfig(backend).add_input(a).add_output(output)
    config.config["scale"] = 12
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(model_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemple #8
0
def test_non_linear(test_dir, backend, tfOp, a_shape, dtype):
    if backend not in ["2PC_OT", "CPP"] and tfOp in [
            tf.math.sqrt,
            tf.math.rsqrt,
            tf.math.sigmoid,
            tf.math.tanh,
    ]:
        pytest.skip(
            "Operation {op} not supported for backend {backend}".format(
                op=tfOp.__name__, backend=backend))
    if a_shape == []:
        pytest.skip(
            "[Athos] Missing Support for tan/sig/sqrt/relu of scalar (0-d) variables"
        )

    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    if tfOp in [tf.math.sqrt, tf.math.rsqrt]:
        a_inp = np.abs(a_inp)

    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        output = tfOp(a, name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})
    assert expected_output is not None
    config = TFConfig(backend).add_input(a).add_output(output)
    config.config["scale"] = 12
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(model_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemple #9
0
def test_pad(test_dir, backend, a_shape, paddings, mode, constant_values,
             dtype):
    graph = tf.Graph()
    a_inp = dtype(np.random.randn(*a_shape))
    with graph.as_default():
        a = tf.compat.v1.placeholder(tf.as_dtype(dtype),
                                     shape=a_inp.shape,
                                     name="a")
        pad = tf.constant(paddings, name="paddings")
        output = tf.pad(a,
                        pad,
                        mode=mode,
                        constant_values=constant_values,
                        name="output")
    with tf.compat.v1.Session(graph=graph) as sess:
        expected_output = sess.run(output, feed_dict={a: a_inp})

    config = Config(backend).add_input(a).add_output(output)
    compiler = Compiler(graph, config, test_dir)
    mpc_output = compiler.compile_and_run([a_inp])
    assert_almost_equal(tf_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemple #10
0
def test_convtranspose(
    test_dir,
    backend,
    a_shape,
    kernel_shape,
    pads,
    strides,
    output_shape,
    output_padding,
    dtype,
):
    Op = "ConvTranspose"
    if len(a_shape) == 4:
        version = 2  # 2d
    elif len(a_shape) == 5:
        version = 3  # 3d

    if version == 3 and backend in ["2PC_HE", "2PC_OT"]:
        pytest.skip("[conv3dtranspose] Missing Support in SCI")

    a = np.random.randn(*a_shape).astype(dtype)
    kernel = np.random.randn(*kernel_shape).astype(dtype)

    # Only need this for its shape
    out = np.zeros(output_shape).astype(dtype)

    hw_kernel_shape = kernel_shape[-version:]
    if not output_padding:
        node = onnx.helper.make_node(
            Op,
            inputs=["a", "kernel"],
            outputs=["output"],
            kernel_shape=hw_kernel_shape,
            pads=pads,
            strides=strides
            # Default values for other attributes: dilations=[1, 1], groups=1
        )
    else:
        node = onnx.helper.make_node(
            Op,
            inputs=["a", "kernel"],
            outputs=["output"],
            kernel_shape=hw_kernel_shape,
            pads=pads,
            strides=strides,
            output_padding=[1, 1]
            # Default values for other attributes: dilations=[1, 1], groups=1
        )

    graph = make_onnx_graph(
        node,
        inputs=[a],
        outputs=[out],
        tensors=[kernel],
        tensor_names=["kernel"],
        name=Op + "_test",
    )
    expected_output = run_onnx(graph, [a])
    config = ONNXConfig(backend).parse_io(graph)
    config.config["scale"] = 12
    compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
    mpc_output = compiler.compile_and_run([a])
    assert_almost_equal(model_output=expected_output,
                        mpc_tensor=mpc_output,
                        precision=2)
    return
Exemple #11
0
def test_cast(test_dir, backend, from_type, to_type, compile_time):
    Op = "Cast"
    shape = (3, 4)
    if "STRING" != from_type:
        input = np.random.random_sample(shape).astype(
            TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, from_type)]
        )
        if "STRING" == to_type:
            # Converting input to str, then give it object dtype for generating script
            ss = []
            for i in input.flatten():
                s = str(i).encode("utf-8")
                su = s.decode("utf-8")
                ss.append(su)

            output = np.array(ss).astype(object).reshape([3, 4])
        else:
            output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])
    else:
        input = np.array(
            [
                "0.47892547",
                "0.48033667",
                "0.49968487",
                "0.81910545",
                "0.47031248",
                "0.816468",
                "0.21087195",
                "0.7229038",
                "NaN",
                "INF",
                "+INF",
                "-INF",
            ],
            dtype=np.dtype(object),
        ).reshape([3, 4])
        output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])
    node = onnx.helper.make_node(
        Op,
        inputs=["input"],
        outputs=["output"],
        to=getattr(TensorProto, to_type),
    )
    if compile_time == True:
        graph = make_onnx_graph(
            node,
            inputs=[],
            outputs=[output],
            tensors=[input],
            tensor_names=["input"],
            name=Op + "_test",
        )
        expected_output = run_onnx(graph, [])
    else:
        graph = make_onnx_graph(
            node,
            inputs=[input],
            outputs=[output],
            tensors=[],
            tensor_names=[],
            name=Op + "_test",
        )
        expected_output = run_onnx(graph, [input])
    config = ONNXConfig(backend).parse_io(graph)
    compiler = Compiler(graph, config, test_dir, Frontend.ONNX)
    if compile_time == True:
        mpc_output = compiler.compile_and_run([])
    else:
        mpc_output = compiler.compile_and_run([input])

    assert_almost_equal(
        model_output=expected_output, mpc_tensor=mpc_output, precision=2
    )
    return