Esempio n. 1
0
def test_callback():
    import tensorflow as tf
    from dace.frontend.tensorflow import TFSession

    input_image = tf.constant(0.69, tf.float64, [2, 2, 5, 5, 2])
    conv_filter = tf.constant(0.01, tf.float64, [1, 1, 1, 2, 2])
    tests = []
    tests.append(
        tf.nn.conv3d(input_image,
                     conv_filter,
                     strides=[1, 1, 1, 1, 1],
                     padding="VALID"))

    myinput = tf.constant(0.69, tf.float64, [2, 2])
    tests.append(tf.sigmoid(myinput))

    myinput = np.random.rand(2, 3, 4).astype(np.float64)
    tests.append(tf.reduce_max(myinput))

    myinput = np.random.rand(10).astype(np.float64)
    tests.append(tf.nn.top_k(myinput, 4)[0])
    tests.append(tf.nn.top_k(myinput, 4)[1])

    sess_tf = tf.Session()
    sess_dace = TFSession()

    for test in tests:
        output_tf = sess_tf.run(test)
        output_dace = sess_dace.run(test)
        print(output_dace)
        print(output_tf)
        assert np.linalg.norm(output_dace - output_tf) < 1e-8
Esempio n. 2
0
def test_mean():
    import tensorflow as tf
    from dace.frontend.tensorflow import TFSession
    shape = [10, 11, 12, 13]

    inp = tf.placeholder(tf.float64, shape)
    outp_1 = tf.reduce_mean(inp, keepdims=True)
    outp_3 = tf.reduce_mean(inp, axis=[0, 2], keepdims=True)
    outp_0 = tf.reduce_mean(inp, axis=[0, 2])
    outp_2 = tf.reduce_mean(inp, axis=[-2, -1])
    outp_4 = tf.reduce_mean(inp, axis=[0, -1], keepdims=True)

    sess_tf = tf.Session()
    sess_dace = TFSession()
    real_inp = np.random.rand(*shape)
    for index, op in enumerate([outp_0, outp_1, outp_2, outp_3, outp_4]):
        output_tf = sess_tf.run(op, feed_dict={inp: real_inp})
        output_dace = sess_dace.run(op, feed_dict={inp: real_inp})
        try:
            assert tf.norm(output_dace -
                           output_tf).eval(session=sess_tf) < 1e-10
        except:
            print(output_dace)
            print(output_tf)
            print(tf.norm(output_dace - output_tf).eval(session=sess_tf))
            raise AssertionError("mean test {i} failed".format(i=index))

    print("mean tests passed!")
Esempio n. 3
0
def test_shapen():
    import tensorflow as tf
    from dace.frontend.tensorflow import TFSession
    myshape = [69, 96, 666]
    num_inputs = 5

    inpList = [tf.ones(myshape) for _ in range(num_inputs)]

    sess_tf = tf.Session()
    sess_dace = TFSession()

    shapes_tf = sess_tf.run(tf.shape_n(inpList))
    shapes_dace = sess_dace.run(tf.shape_n(inpList))
    for dc, tf in zip(shapes_dace, shapes_tf):
        try:
            assert (dc == tf).all()
        except (AssertionError):
            print(dc)
            print(tf)
Esempio n. 4
0
def test_addn():
    import tensorflow as tf
    from dace.frontend.tensorflow import TFSession
    shape = [10, 11, 12, 13]
    inputs = [np.random.rand(*shape) for _ in range(10)]
    addn_test_0 = tf.add_n(inputs)

    sess_tf = tf.Session()
    sess_dace = TFSession()

    output_tf = sess_tf.run(addn_test_0)
    output_dace = sess_dace.run(addn_test_0)
    try:
        assert tf.norm(output_dace - output_tf).eval(session=sess_tf) < 1e-10
    except:
        print(output_dace)
        print(output_tf)
        print(tf.norm(output_dace - output_tf).eval(session=sess_tf))
        raise AssertionError("AddN test failed")
    print("AddN test passed!")
Esempio n. 5
0
def test_slice():
    import tensorflow as tf
    from dace.frontend.tensorflow import TFSession
    t = tf.placeholder(tf.int32, [3, 2, 3])
    b = tf.placeholder(tf.int32, [3])
    s = tf.placeholder(tf.int32, [3])
    output = tf.placeholder(tf.int32, [1, 1, 3])
    output = tf.slice(t, b, s)
    input_tensor = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
                                [[5, 5, 5], [6, 6, 6]]])

    sess_tf = tf.Session()
    sess_dace = TFSession()

    begin_tensor = tf.constant([1, 0, 0])
    size_tensor_1 = tf.constant([1, 2, 2])
    size_tensor_2 = tf.constant([1, 2, 3])
    size_tensor_3 = tf.constant([2, 1, 3])
    tf_out = sess_tf.run(tf.slice(input_tensor, begin_tensor, size_tensor_3))
    dace_out = sess_dace.run(
        tf.slice(input_tensor, begin_tensor, size_tensor_3))
    print(tf_out)
    print(dace_out)
    assert (tf_out == dace_out).all()
Esempio n. 6
0
def test_simple():
    import tensorflow as tf
    from dace.frontend.tensorflow import TFSession
    print('DaCe Tensorflow frontend test')

    A = np.random.rand(16, 16).astype(np.float32)
    B = np.random.rand(16, 16).astype(np.float32)

    A_tf = tf.placeholder(tf.float32, shape=[16, 16])
    B_tf = tf.placeholder(tf.float32, shape=[16, 16])

    with TFSession() as sess:
        # Simple matrix multiplication
        C = sess.run(A_tf @ B_tf, feed_dict={A_tf: A, B_tf: B})

    diff = np.linalg.norm(C - (A @ B)) / (16 * 16)
    print("Difference:", diff)
    print("==== Program end ====")
    assert diff <= 1e-5
Esempio n. 7
0
try:
    import tensorflow as tf
except ImportError:
    print("WARNING: Tensorflow not found, skipping test")
    exit(0)

from dace.frontend.tensorflow import TFSession

if __name__ == '__main__':
    t = tf.placeholder(tf.int32, [3, 2, 3])
    b = tf.placeholder(tf.int32, [3])
    s = tf.placeholder(tf.int32, [3])
    output = tf.placeholder(tf.int32, [1, 1, 3])
    output = tf.slice(t, b, s)
    input_tensor = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]],
                                [[5, 5, 5], [6, 6, 6]]])

    sess_tf = tf.Session()
    sess_dace = TFSession()

    begin_tensor = tf.constant([1, 0, 0])
    size_tensor_1 = tf.constant([1, 2, 2])
    size_tensor_2 = tf.constant([1, 2, 3])
    size_tensor_3 = tf.constant([2, 1, 3])
    tf_out = sess_tf.run(tf.slice(input_tensor, begin_tensor, size_tensor_3))
    dace_out = sess_dace.run(
        tf.slice(input_tensor, begin_tensor, size_tensor_3))
    print(tf_out)
    print(dace_out)
    assert (tf_out == dace_out).all()
Esempio n. 8
0
    input_placeholder = tf.placeholder(tf.float32, size_in)
    ksize = [1, 3, 3, 1]
    stride = [1, 2, 2, 1]
    # need to fix bug in padding SAME
    max_pool_outp = tf.nn.max_pool(input_placeholder,
                                   ksize,
                                   stride,
                                   "VALID",
                                   data_format="NHWC")
    avg_pool_outp = tf.nn.avg_pool(input_placeholder,
                                   ksize,
                                   stride,
                                   "VALID",
                                   data_format="NHWC")
    sess_tf = tf.Session()
    sess_dace = TFSession()
    # MAX pool test
    tf_output = sess_tf.run(max_pool_outp,
                            feed_dict={input_placeholder: input_tensor})
    dace_output = sess_dace.run(max_pool_outp,
                                feed_dict={input_placeholder: input_tensor})
    try:
        assert tf.norm(dace_output - tf_output).eval(session=sess_tf) < 1e-10
    except:
        print(dace_output.shape)
        print(tf_output.shape)
        print(tf.norm(dace_output - tf_output).eval(session=sess_tf))
        raise AssertionError("max pool test failed")
    print("Max pool test passed")

    # AVG pool test
Esempio n. 9
0
if __name__ == '__main__':
    input_image = tf.constant(0.69, tf.float64, [2, 2, 5, 5, 2])
    conv_filter = tf.constant(0.01, tf.float64, [1, 1, 1, 2, 2])
    tests = []
    tests.append(
        tf.nn.conv3d(input_image,
                     conv_filter,
                     strides=[1, 1, 1, 1, 1],
                     padding="VALID"))

    myinput = tf.constant(0.69, tf.float64, [2, 2])
    tests.append(tf.sigmoid(myinput))

    myinput = np.random.rand(2, 3, 4).astype(np.float64)
    tests.append(tf.reduce_max(myinput))

    myinput = np.random.rand(10).astype(np.float64)
    tests.append(tf.nn.top_k(myinput, 4)[0])
    tests.append(tf.nn.top_k(myinput, 4)[1])

    sess_tf = tf.Session()
    sess_dace = TFSession()

    for test in tests:
        output_tf = sess_tf.run(test)
        output_dace = sess_dace.run(test)
        print(output_dace)
        print(output_tf)
        assert np.linalg.norm(output_dace - output_tf) < 1e-8
Esempio n. 10
0
def test_pooling():
    import tensorflow as tf
    from dace.frontend.tensorflow import TFSession
    size_in = [1, 112, 112, 3]
    # size_in = [4, 4, 4, 4]
    np.random.seed(0)
    input_tensor = np.random.uniform(size=size_in).astype(np.float32)
    input_placeholder = tf.placeholder(tf.float32, size_in)
    ksize = [1, 3, 3, 1]
    stride = [1, 2, 2, 1]
    # need to fix bug in padding SAME
    max_pool_outp = tf.nn.max_pool(input_placeholder,
                                   ksize,
                                   stride,
                                   "VALID",
                                   data_format="NHWC")
    avg_pool_outp = tf.nn.avg_pool(input_placeholder,
                                   ksize,
                                   stride,
                                   "VALID",
                                   data_format="NHWC")
    sess_tf = tf.Session()
    sess_dace = TFSession()
    # MAX pool test
    tf_output = sess_tf.run(max_pool_outp,
                            feed_dict={input_placeholder: input_tensor})
    dace_output = sess_dace.run(max_pool_outp,
                                feed_dict={input_placeholder: input_tensor})
    try:
        assert tf.norm(dace_output - tf_output).eval(session=sess_tf) < 1e-10
    except:
        print(dace_output.shape)
        print(tf_output.shape)
        print(tf.norm(dace_output - tf_output).eval(session=sess_tf))
        raise AssertionError("max pool test failed")
    print("Max pool test passed")

    # AVG pool test
    tf_output = sess_tf.run(avg_pool_outp,
                            feed_dict={input_placeholder: input_tensor})
    dace_output = sess_dace.run(avg_pool_outp,
                                feed_dict={input_placeholder: input_tensor})
    try:
        assert tf.norm(dace_output - tf_output).eval(session=sess_tf) < 1e-5
    except:
        print(dace_output.shape)
        print(tf_output.shape)
        print(tf.norm(dace_output - tf_output).eval(session=sess_tf))
        raise AssertionError("avg pool test failed")
    print("Average pool test passed")

    # AVG pool gradient test
    np.random.seed(0)
    loss_placeholder = tf.placeholder(tf.float32, avg_pool_outp.shape)
    loss_tensor = np.random.uniform(size=avg_pool_outp.shape).astype(
        np.float32)
    grads_avg = tf.gradients(avg_pool_outp,
                             input_placeholder,
                             grad_ys=loss_placeholder)
    dace_output = sess_dace.run(grads_avg,
                                feed_dict={loss_placeholder: loss_tensor})
    tf_output = sess_tf.run(grads_avg,
                            feed_dict={loss_placeholder: loss_tensor})
    try:
        assert tf.norm(dace_output[0] -
                       tf_output[0]).eval(session=sess_tf) < 1e-5
    except:
        print(dace_output)
        print(tf_output)
        print(tf.norm(dace_output[0] - tf_output[0]).eval(session=sess_tf))
        raise AssertionError("avg pool gradient test failed")

    # Max pool gradient test
    loss_placeholder = tf.placeholder(tf.float32, max_pool_outp.shape)
    np.random.seed(0)
    loss_tensor = np.random.uniform(size=max_pool_outp.shape).astype(
        np.float32)
    grads_max = tf.gradients(max_pool_outp,
                             input_placeholder,
                             grad_ys=loss_placeholder)
    dace_output = sess_dace.run(
        grads_max,
        feed_dict={
            input_placeholder: input_tensor,
            loss_placeholder: loss_tensor
        },
    )
    tf_output = sess_tf.run(
        grads_max,
        feed_dict={
            input_placeholder: input_tensor,
            loss_placeholder: loss_tensor
        },
    )
    try:
        assert tf.norm(dace_output[0] -
                       tf_output[0]).eval(session=sess_tf) < 1e-5
    except:
        print(dace_output)
        print(tf_output)
        print(tf.norm(dace_output[0] - tf_output[0]).eval(session=sess_tf))
        raise AssertionError("max pool gradient test failed")
Esempio n. 11
0
import numpy as np
import dace
from dace.frontend.tensorflow import TFSession

if __name__ == '__main__':
    shape = [10, 11, 12, 13]

    inp = tf.placeholder(tf.float64, shape)
    outp_1 = tf.reduce_mean(inp, keepdims=True)
    outp_3 = tf.reduce_mean(inp, axis=[0, 2], keepdims=True)
    outp_0 = tf.reduce_mean(inp, axis=[0, 2])
    outp_2 = tf.reduce_mean(inp, axis=[-2, -1])
    outp_4 = tf.reduce_mean(inp, axis=[0, -1], keepdims=True)

    sess_tf = tf.Session()
    sess_dace = TFSession()
    real_inp = np.random.rand(*shape)
    for index, op in enumerate([outp_0, outp_1, outp_2, outp_3, outp_4]):
        output_tf = sess_tf.run(op, feed_dict={inp: real_inp})
        output_dace = sess_dace.run(op, feed_dict={inp: real_inp})
        try:
            assert tf.norm(output_dace -
                           output_tf).eval(session=sess_tf) < 1e-10
        except:
            print(output_dace)
            print(output_tf)
            print(tf.norm(output_dace - output_tf).eval(session=sess_tf))
            raise AssertionError("mean test {i} failed".format(i=index))

    print("mean tests passed!")
    inputs = [np.random.rand(*shape) for _ in range(10)]
Esempio n. 12
0
import numpy as np
import dace
from dace.frontend.tensorflow import TFSession

inp_shape = [10, 10, 10, 10]
filter_shape = [3, 3, 10, 3]
strides = [1, 3, 3, 1]

inp = tf.placeholder(tf.float64, inp_shape)
filter = tf.placeholder(tf.float64, filter_shape)
outp = tf.nn.conv2d(inp, filter, strides, padding="SAME", data_format="NHWC")

test_in = np.random.uniform(size=tuple(inp_shape)).astype(np.float64)
test_filter = np.random.uniform(size=tuple(filter_shape)).astype(np.float64)

sess_dace = TFSession()
sess_tf = tf.Session()

output_dace = sess_dace.run(outp,
                            feed_dict={
                                inp: test_in,
                                filter: test_filter
                            })
output_tf = sess_tf.run(outp, feed_dict={inp: test_in, filter: test_filter})
try:
    assert tf.norm(output_dace - output_tf).eval(session=sess_tf) < 1e-10
except:
    print(output_tf)
    print(output_dace)
    print(tf.linalg.norm(output_tf - output_dace).eval(session=sess_tf))
    raise AssertionError("Convolution test failed")
Esempio n. 13
0
try:
    import tensorflow as tf
except ImportError:
    print("WARNING: Tensorflow not found, skipping test")
    exit(0)

from dace.frontend.tensorflow import TFSession

myshape = [69, 96, 666]
num_inputs = 5

inpList = [tf.ones(myshape) for _ in range(num_inputs)]

sess_tf = tf.Session()
sess_dace = TFSession()

shapes_tf = sess_tf.run(tf.shape_n(inpList))
shapes_dace = sess_dace.run(tf.shape_n(inpList))
for dc, tf in zip(shapes_dace, shapes_tf):
    try:
        assert (dc == tf).all()
    except (AssertionError):
        print(dc)
        print(tf)
Esempio n. 14
0
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.

try:
    import tensorflow as tf
except ImportError:
    print("WARNING: Tensorflow not found, skipping test")
    exit(0)

import numpy as np
from dace.frontend.tensorflow import TFSession

if __name__ == '__main__':
    print('DaCe Tensorflow frontend test')

    A = np.random.rand(16, 16).astype(np.float32)
    B = np.random.rand(16, 16).astype(np.float32)

    A_tf = tf.placeholder(tf.float32, shape=[16, 16])
    B_tf = tf.placeholder(tf.float32, shape=[16, 16])

    with TFSession() as sess:
        # Simple matrix multiplication
        C = sess.run(A_tf @ B_tf, feed_dict={A_tf: A, B_tf: B})

    diff = np.linalg.norm(C - (A @ B)) / (16 * 16)
    print("Difference:", diff)
    print("==== Program end ====")
    exit(0 if diff <= 1e-5 else 1)
Esempio n. 15
0
    scale = tf.placeholder(tf.float32, [num_channels])
    offset = tf.placeholder(tf.float32, [num_channels])
    populationMean = tf.placeholder(tf.float32, [num_channels])
    populationVariance = tf.placeholder(tf.float32, [num_channels])
    y, mean, var, _, var_sqrt = gen_nn_ops._fused_batch_norm(inp,
                                                             scale,
                                                             offset, [], [],
                                                             epsilon=0.1,
                                                             is_training=True)
    outputs = [y, mean, var]
    test_in = np.random.uniform(size=size).astype(np.float32)
    test_scale = np.random.uniform(size=[num_channels]).astype(np.float32)
    test_offset = np.random.uniform(size=[num_channels]).astype(np.float32)

    sess_tf = tf.Session(config=config)
    sess_dace = TFSession()

    outputs_dace = sess_dace.run(
        outputs,
        feed_dict={
            inp: test_in,
            scale: test_scale,
            offset: test_offset,
        },
    )
    outputs_tf = sess_tf.run(
        outputs,
        feed_dict={
            inp: test_in,
            scale: test_scale,
            offset: test_offset,
Esempio n. 16
0
def test_fused_batch_norm():
    import tensorflow as tf
    from tensorflow.python.ops import gen_nn_ops
    from dace.frontend.tensorflow import TFSession

    num_channels = 3
    size = [8, 224, 224, num_channels]

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    inp = tf.placeholder(tf.float32, size)
    scale = tf.placeholder(tf.float32, [num_channels])
    offset = tf.placeholder(tf.float32, [num_channels])
    populationMean = tf.placeholder(tf.float32, [num_channels])
    populationVariance = tf.placeholder(tf.float32, [num_channels])
    y, mean, var, _, var_sqrt = gen_nn_ops._fused_batch_norm(inp,
                                                             scale,
                                                             offset, [], [],
                                                             epsilon=0.1,
                                                             is_training=True)
    outputs = [y, mean, var]
    test_in = np.random.uniform(size=size).astype(np.float32)
    test_scale = np.random.uniform(size=[num_channels]).astype(np.float32)
    test_offset = np.random.uniform(size=[num_channels]).astype(np.float32)

    sess_tf = tf.Session(config=config)
    sess_dace = TFSession()

    outputs_dace = sess_dace.run(
        outputs,
        feed_dict={
            inp: test_in,
            scale: test_scale,
            offset: test_offset,
        },
    )
    outputs_tf = sess_tf.run(
        outputs,
        feed_dict={
            inp: test_in,
            scale: test_scale,
            offset: test_offset,
        },
    )

    try:
        assert (tf.linalg.norm(outputs_tf[0] -
                               outputs_dace[0]).eval(session=sess_tf) < 1e-1
                and tf.linalg.norm(outputs_dace[2] -
                                   outputs_tf[2]).eval(session=sess_tf) < 1e-4
                and tf.linalg.norm(outputs_dace[1] -
                                   outputs_tf[1]).eval(session=sess_tf) < 1e-4)
    except:
        print("FBN test failed")
        print(
            tf.linalg.norm(outputs_tf[0] -
                           outputs_dace[0]).eval(session=sess_tf))
        print(
            tf.linalg.norm(outputs_tf[1] -
                           outputs_dace[1]).eval(session=sess_tf))
        print(
            tf.linalg.norm(outputs_tf[2] -
                           outputs_dace[2]).eval(session=sess_tf))

    ################# FBN GRADIENT TEST ###############################
    outputGrad = tf.placeholder(tf.float32, size)
    x_grad, gamma_grad, beta_grad, _, _ = gen_nn_ops.fused_batch_norm_grad(
        outputGrad,
        inp,
        scale,
        outputs[1],
        var_sqrt,
        epsilon=0.1,
        is_training=True)
    gradients = [x_grad, gamma_grad, beta_grad]
    test_outputgrad = np.random.uniform(size=size).astype(np.float32)
    outputs_dace = sess_dace.run(
        gradients,
        feed_dict={
            inp: test_in,
            outputGrad: test_outputgrad,
            scale: test_scale,
            offset: test_offset,
        },
    )
    # TF
    x_grad, gamma_grad, beta_grad, _, _ = gen_nn_ops.fused_batch_norm_grad(
        outputGrad,
        inp,
        scale,
        outputs[1],
        tf.math.rsqrt(outputs[2] + float(0.1))
        if tf.test.is_built_with_cuda() else outputs[2],
        epsilon=0.1,
        is_training=True,
    )
    gradients = [x_grad, gamma_grad, beta_grad]
    # writer = tf.summary.FileWriter("./", sess_tf.graph)
    outputs_tf = sess_tf.run(
        gradients,
        feed_dict={
            inp: test_in,
            outputGrad: test_outputgrad,
            scale: test_scale,
            offset: test_offset,
        },
    )
    try:
        assert (tf.linalg.norm(outputs_tf[0] -
                               outputs_dace[0]).eval(session=sess_tf) < 1e-1
                and tf.linalg.norm(outputs_dace[2] -
                                   outputs_tf[2]).eval(session=sess_tf) < 10
                and tf.linalg.norm(outputs_dace[1] -
                                   outputs_tf[1]).eval(session=sess_tf) < 10)
    except:
        print("FBN Gradient test failed")
        print(
            tf.linalg.norm(outputs_tf[0] -
                           outputs_dace[0]).eval(session=sess_tf))
        print(
            tf.linalg.norm(outputs_tf[1] -
                           outputs_dace[1]).eval(session=sess_tf))
        print(
            tf.linalg.norm(outputs_tf[2] -
                           outputs_dace[2]).eval(session=sess_tf))
        print(
            tf.linalg.norm(outputs_tf[2] -
                           np.sum(test_outputgrad, axis=(0, 1, 2))).eval(
                               session=sess_tf))
Esempio n. 17
0
def test_conv():
    import tensorflow as tf
    from tensorflow.python.ops import gen_nn_ops
    from dace.frontend.tensorflow import TFSession
    inp_shape = [10, 10, 10, 10]
    filter_shape = [3, 3, 10, 3]
    strides = [1, 3, 3, 1]

    inp = tf.placeholder(tf.float64, inp_shape)
    filter = tf.placeholder(tf.float64, filter_shape)
    outp = tf.nn.conv2d(inp,
                        filter,
                        strides,
                        padding="SAME",
                        data_format="NHWC")

    test_in = np.random.uniform(size=tuple(inp_shape)).astype(np.float64)
    test_filter = np.random.uniform(size=tuple(filter_shape)).astype(
        np.float64)

    sess_dace = TFSession()
    sess_tf = tf.Session()

    output_dace = sess_dace.run(outp,
                                feed_dict={
                                    inp: test_in,
                                    filter: test_filter
                                })
    output_tf = sess_tf.run(outp,
                            feed_dict={
                                inp: test_in,
                                filter: test_filter
                            })
    try:
        assert tf.norm(output_dace - output_tf).eval(session=sess_tf) < 1e-10
    except:
        print(output_tf)
        print(output_dace)
        print(tf.linalg.norm(output_tf - output_dace).eval(session=sess_tf))
        raise AssertionError("Convolution test failed")
    ##### Conv backprop grad ######
    inp_shape = [10, 10, 10, 10]
    filters = [[2, 2, 10, 3]]
    strides = [[1, 3, 3, 1]]
    paddings = ["VALID"]
    for p in paddings:
        for f in filters:
            for s in strides:
                print(p, f, s)
                filter = tf.placeholder(tf.float64, f)
                outp = tf.nn.conv2d(inp,
                                    filter,
                                    s,
                                    padding=p,
                                    data_format="NHWC")
                out_backprop = tf.placeholder(tf.float64, outp.shape)
                inp_gradients = gen_nn_ops.conv2d_backprop_input(inp_shape,
                                                                 filter,
                                                                 out_backprop,
                                                                 s,
                                                                 padding=p)
                test_grads = np.random.uniform(size=outp.shape).astype(
                    np.float64)
                test_filter = np.random.uniform(size=tuple(f)).astype(
                    np.float64)

                output_tf = sess_tf.run(inp_gradients,
                                        feed_dict={
                                            filter: test_filter,
                                            out_backprop: test_grads
                                        })
                output_dace = sess_dace.run(inp_gradients,
                                            feed_dict={
                                                filter: test_filter,
                                                out_backprop: test_grads
                                            })

                try:
                    assert tf.norm(output_dace -
                                   output_tf).eval(session=sess_tf) < 1e-10
                except:
                    print(p)
                    print(f)
                    print(s)
                    print(output_tf)
                    print(output_dace)
                    print(
                        tf.linalg.norm(output_tf -
                                       output_dace).eval(session=sess_tf))
                    raise AssertionError("Convolution grad test failed")

    ##### Conv filter backprop ##################
    inp_shape = [10, 10, 10, 10]
    filters = [[4, 4, 10, 3]]
    strides = [[1, 1, 1, 1]]
    paddings = ["SAME"]
    for p in paddings:
        for f in filters:
            for s in strides:
                input_placeholder = tf.placeholder(tf.float64, inp_shape)
                filter = tf.placeholder(tf.float64, f)
                outp = tf.nn.conv2d(inp,
                                    filter,
                                    s,
                                    padding=p,
                                    data_format="NHWC")
                out_backprop = tf.placeholder(tf.float64, outp.shape)
                filter_gradients = gen_nn_ops.conv2d_backprop_filter(
                    input_placeholder, f, out_backprop, s, padding=p)
                test_grads = np.random.uniform(size=outp.shape).astype(
                    np.float64)
                test_input = np.random.uniform(size=tuple(inp_shape)).astype(
                    np.float64)

                output_tf = sess_tf.run(filter_gradients,
                                        feed_dict={
                                            input_placeholder: test_input,
                                            out_backprop: test_grads
                                        })
                output_dace = sess_dace.run(filter_gradients,
                                            feed_dict={
                                                input_placeholder: test_input,
                                                out_backprop: test_grads
                                            })

                try:
                    assert tf.norm(output_dace -
                                   output_tf).eval(session=sess_tf) < 1e-10
                except:
                    print(p)
                    print(f)
                    print(s)
                    print(output_tf)
                    print(output_dace)
                    print(
                        tf.linalg.norm(output_tf -
                                       output_dace).eval(session=sess_tf))
                    raise AssertionError("Convolution filter grad test failed")
Esempio n. 18
0
    softmax = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=label_node, logits=logits)
    loss = tf.reduce_mean(softmax, name="loss")

    # Get gradient tensors and initializer operation
    gradients = tf.gradients(loss, tf.trainable_variables())
    init = tf.global_variables_initializer()

    # Compute gradients and compare
    # Tensorflow
    with tf.Session() as sess:
        sess.run(init)
        tf_gradients = sess.run(gradients)

    # DaCe
    with TFSession(seed=SEED) as sess:
        sess.run(init)
        dace_gradients = sess.run(gradients)

    # Compare
    for tfgrad, dacegrad in zip(tf_gradients, dace_gradients):
        inf_norm = np.linalg.norm((tfgrad - dacegrad).flatten())
        print("Max. Diff:", inf_norm)
        if (inf_norm <= 1e-4):
            continue
        else:
            print("==== Program end====")
            print("Error: norm too large")
            exit(1)
    print("==== Program end ====")
    exit(0)
Esempio n. 19
0
    softmax = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
                                                             logits=logits)
    loss = tf.reduce_mean(softmax, name="loss")
    gradients = tf.train.GradientDescentOptimizer(
        learning_rate).compute_gradients(loss)
    gradient_tensors = []
    for tup in gradients:
        gradient_tensors.append(tup[0])
    update_op = tf.train.GradientDescentOptimizer(
        learning_rate).apply_gradients(gradients)

    return logits, update_op


# DaCe
sess = TFSession(seed=SEED)
y = build_resnet(input_placeholder, label_placeholder)

# TensorFlow + XLA
#sess = tf.Session()
#[y] = xla.compile(build_resnet, inputs=[input_placeholder, label_placeholder])

init = tf.global_variables_initializer()
sess.run(init)

images, labels = random_batch(batch_size)

# Warmup run
sess_run = sess.compile(y, gpu=True)  # Change to gpu=True to run on the GPU

start = time.time()