def _Conv2DBackpropInputGrad(op, grad): """The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter """ # We call the gen_nn_ops backprop functions instead of nn_ops backprop # functions for performance reasons in Eager mode. See _Conv2DGrad. return [ None, gen_nn_ops.conv2d_backprop_filter( grad, array_ops.shape(op.inputs[1]), op.inputs[2], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), explicit_paddings=op.get_attr("explicit_paddings"), use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"), data_format=op.get_attr("data_format").decode()), gen_nn_ops.conv2d(grad, op.inputs[1], dilations=op.get_attr("dilations"), strides=op.get_attr("strides"), padding=op.get_attr("padding"), explicit_paddings=op.get_attr("explicit_paddings"), use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"), data_format=op.get_attr("data_format").decode()) ]
def _VerifyValues(self, input_sizes=None, filter_sizes=None, out_backprop_sizes=None, strides=None, dilations=None, padding=None, expected=None): """Tests that gen_nn_ops.conv2d_backprop_filter produces the right output. Args: input_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. out_backprop_sizes: Output gradients tensor dimensions. strides: Stride. dilations: Dilations. padding: Padding type. expected: Expected output. """ total_size_1 = np.prod(input_sizes) total_size_2 = np.prod(out_backprop_sizes) x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes) x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes) strides = [1] + strides + [1] if dilations is not None: dilations = [1] + dilations + [1] with self.test_session() as sess: t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes) t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes) with self.test_scope(): tensor = gen_nn_ops.conv2d_backprop_filter( input=t1, filter_sizes=filter_sizes, out_backprop=t2, strides=strides, dilations=dilations, padding=padding, data_format="NHWC") value = sess.run(tensor, {t1: x1, t2: x2}) self.assertAllEqual(filter_sizes, value.shape) self.assertAllClose(expected, np.ravel(value), 1e-3)
def _VerifyValues(self, input_sizes=None, filter_sizes=None, out_backprop_sizes=None, strides=None, dilations=None, padding=None, expected=None): """Tests that gen_nn_ops.conv2d_backprop_filter produces the right output. Args: input_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. out_backprop_sizes: Output gradients tensor dimensions. strides: Stride. dilations: Dilations. padding: Padding type. expected: Expected output. """ total_size_1 = np.prod(input_sizes) total_size_2 = np.prod(out_backprop_sizes) x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes) x2 = np.arange( 1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes) strides = [1] + strides + [1] if dilations is not None: dilations = [1] + dilations + [1] with self.test_session() as sess: t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes) t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes) with self.test_scope(): tensor = gen_nn_ops.conv2d_backprop_filter( input=t1, filter_sizes=filter_sizes, out_backprop=t2, strides=strides, dilations=dilations, padding=padding, data_format="NHWC") value = sess.run(tensor, {t1: x1, t2: x2}) self.assertAllEqual(filter_sizes, value.shape) self.assertAllClose(expected, np.ravel(value), 1e-3)
def _Conv2DGrad(op, grad): """Gradient function for Conv2D.""" dilations = op.get_attr("dilations") strides = op.get_attr("strides") padding = op.get_attr("padding") explicit_paddings = op.get_attr("explicit_paddings") use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu") data_format = op.get_attr("data_format") shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]]) # We call the gen_nn_ops backprop functions instead of nn_ops backprop # functions for performance reasons in Eager mode. gen_nn_ops functions take a # `explicit_paddings` parameter, but nn_ops functions do not. So if were were # to use the nn_ops functions, we would have to convert `padding` and # `explicit_paddings` into a single `padding` parameter, increasing overhead # in Eager mode. return [ gen_nn_ops.conv2d_backprop_input( shape_0, op.inputs[1], grad, dilations=dilations, strides=strides, padding=padding, explicit_paddings=explicit_paddings, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format), gen_nn_ops.conv2d_backprop_filter( op.inputs[0], shape_1, grad, dilations=dilations, strides=strides, padding=padding, explicit_paddings=explicit_paddings, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format) ]
def _Conv2DGrad(op, grad): """Gradient function for Conv2D.""" dilations = op.get_attr("dilations") strides = op.get_attr("strides") padding = op.get_attr("padding") explicit_paddings = op.get_attr("explicit_paddings") use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu") data_format = op.get_attr("data_format") shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]]) # We call the gen_nn_ops backprop functions instead of nn_ops backprop # functions for performance reasons in Eager mode. gen_nn_ops functions take a # `explicit_paddings` parameter, but nn_ops functions do not. So if were were # to use the nn_ops functions, we would have to convert `padding` and # `explicit_paddings` into a single `padding` parameter, increasing overhead # in Eager mode. return [ gen_nn_ops.conv2d_backprop_input( shape_0, op.inputs[1], grad, dilations=dilations, strides=strides, padding=padding, explicit_paddings=explicit_paddings, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format), gen_nn_ops.conv2d_backprop_filter( op.inputs[0], shape_1, grad, dilations=dilations, strides=strides, padding=padding, explicit_paddings=explicit_paddings, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format) ]
def _VerifyValues(self, input_sizes=None, filter_sizes=None, out_backprop_sizes=None, strides=None, dilations=None, padding=None, data_format_src="NHWC", data_format_dst="NHWC", expected=None): """Tests that gen_nn_ops.conv2d_backprop_filter produces the right output. Args: input_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. out_backprop_sizes: Output gradients tensor dimensions. strides: Stride. dilations: Dilations. padding: Padding type. data_format_src: Data format input is in. data_format_dst: Data format verification will run and input is converted to. expected: Expected output. """ total_size_1 = np.prod(input_sizes) total_size_2 = np.prod(out_backprop_sizes) x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes) x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes) strides = [1] + strides + [1] if dilations is not None: dilations = [1] + dilations + [1] expected = np.reshape(expected, filter_sizes) # Convert between data formats. x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst) x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src, data_format_dst) input_sizes = test_utils.PermuteDimsBetweenDataFormats( input_sizes, data_format_src, data_format_dst) out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats( out_backprop_sizes, data_format_src, data_format_dst) strides = test_utils.PermuteDimsBetweenDataFormats( strides, data_format_src, data_format_dst) if dilations is not None: dilations = test_utils.PermuteDimsBetweenDataFormats( dilations, data_format_src, data_format_dst) with self.cached_session() as sess: t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes) t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes) with self.test_scope(): tensor = gen_nn_ops.conv2d_backprop_filter( input=t1, filter_sizes=filter_sizes, out_backprop=t2, strides=strides, dilations=dilations, padding=padding, data_format=data_format_dst) value = sess.run(tensor, {t1: x1, t2: x2}) self.assertAllEqual(filter_sizes, value.shape) self.assertAllClose(expected, value, 1e-3)
output_dace).eval(session=sess_tf)) raise AssertionError("Convolution grad test failed") ##### Conv filter backprop ################## inp_shape = [10, 10, 10, 10] filters = [[i, i, 10, 3] for i in [1, 2, 3, 4, 7]] strides = [[1, i, i, 1] for i in [1, 3, 4, 7, 8]] paddings = ["SAME", "VALID"] for p in paddings: for f in filters: for s in strides: input_placeholder = tf.placeholder(tf.float64, inp_shape) filter = tf.placeholder(tf.float64, f) outp = tf.nn.conv2d(inp, filter, s, padding=p, data_format="NHWC") out_backprop = tf.placeholder(tf.float64, outp.shape) filter_gradients = gen_nn_ops.conv2d_backprop_filter( input_placeholder, f, out_backprop, s, padding=p) test_grads = np.random.uniform(size=outp.shape).astype(np.float64) test_input = np.random.uniform(size=tuple(inp_shape)).astype( np.float64) output_tf = sess_tf.run(filter_gradients, feed_dict={ input_placeholder: test_input, out_backprop: test_grads }) output_dace = sess_dace.run(filter_gradients, feed_dict={ input_placeholder: test_input, out_backprop: test_grads })
def _VerifyValues(self, input_sizes=None, filter_sizes=None, out_backprop_sizes=None, strides=None, dilations=None, padding=None, data_format_src="NHWC", data_format_dst="NHWC", expected=None): """Tests that gen_nn_ops.conv2d_backprop_filter produces the right output. Args: input_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. out_backprop_sizes: Output gradients tensor dimensions. strides: Stride. dilations: Dilations. padding: Padding type. data_format_src: Data format input is in. data_format_dst: Data format verification will run and input is converted to. expected: Expected output. """ total_size_1 = np.prod(input_sizes) total_size_2 = np.prod(out_backprop_sizes) x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes) x2 = np.arange( 1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes) strides = [1] + strides + [1] if dilations is not None: dilations = [1] + dilations + [1] expected = np.reshape(expected, filter_sizes) # Convert between data formats. x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src, data_format_dst) x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src, data_format_dst) input_sizes = test_utils.PermuteDimsBetweenDataFormats( input_sizes, data_format_src, data_format_dst) out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats( out_backprop_sizes, data_format_src, data_format_dst) strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src, data_format_dst) if dilations is not None: dilations = test_utils.PermuteDimsBetweenDataFormats( dilations, data_format_src, data_format_dst) with self.test_session() as sess: t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes) t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes) with self.test_scope(): tensor = gen_nn_ops.conv2d_backprop_filter( input=t1, filter_sizes=filter_sizes, out_backprop=t2, strides=strides, dilations=dilations, padding=padding, data_format=data_format_dst) value = sess.run(tensor, {t1: x1, t2: x2}) self.assertAllEqual(filter_sizes, value.shape) self.assertAllClose(expected, value, 1e-3)
def test_conv(): import tensorflow as tf from tensorflow.python.ops import gen_nn_ops from dace.frontend.tensorflow import TFSession inp_shape = [10, 10, 10, 10] filter_shape = [3, 3, 10, 3] strides = [1, 3, 3, 1] inp = tf.placeholder(tf.float64, inp_shape) filter = tf.placeholder(tf.float64, filter_shape) outp = tf.nn.conv2d(inp, filter, strides, padding="SAME", data_format="NHWC") test_in = np.random.uniform(size=tuple(inp_shape)).astype(np.float64) test_filter = np.random.uniform(size=tuple(filter_shape)).astype( np.float64) sess_dace = TFSession() sess_tf = tf.Session() output_dace = sess_dace.run(outp, feed_dict={ inp: test_in, filter: test_filter }) output_tf = sess_tf.run(outp, feed_dict={ inp: test_in, filter: test_filter }) try: assert tf.norm(output_dace - output_tf).eval(session=sess_tf) < 1e-10 except: print(output_tf) print(output_dace) print(tf.linalg.norm(output_tf - output_dace).eval(session=sess_tf)) raise AssertionError("Convolution test failed") ##### Conv backprop grad ###### inp_shape = [10, 10, 10, 10] filters = [[2, 2, 10, 3]] strides = [[1, 3, 3, 1]] paddings = ["VALID"] for p in paddings: for f in filters: for s in strides: print(p, f, s) filter = tf.placeholder(tf.float64, f) outp = tf.nn.conv2d(inp, filter, s, padding=p, data_format="NHWC") out_backprop = tf.placeholder(tf.float64, outp.shape) inp_gradients = gen_nn_ops.conv2d_backprop_input(inp_shape, filter, out_backprop, s, padding=p) test_grads = np.random.uniform(size=outp.shape).astype( np.float64) test_filter = np.random.uniform(size=tuple(f)).astype( np.float64) output_tf = sess_tf.run(inp_gradients, feed_dict={ filter: test_filter, out_backprop: test_grads }) output_dace = sess_dace.run(inp_gradients, feed_dict={ filter: test_filter, out_backprop: test_grads }) try: assert tf.norm(output_dace - output_tf).eval(session=sess_tf) < 1e-10 except: print(p) print(f) print(s) print(output_tf) print(output_dace) print( tf.linalg.norm(output_tf - output_dace).eval(session=sess_tf)) raise AssertionError("Convolution grad test failed") ##### Conv filter backprop ################## inp_shape = [10, 10, 10, 10] filters = [[4, 4, 10, 3]] strides = [[1, 1, 1, 1]] paddings = ["SAME"] for p in paddings: for f in filters: for s in strides: input_placeholder = tf.placeholder(tf.float64, inp_shape) filter = tf.placeholder(tf.float64, f) outp = tf.nn.conv2d(inp, filter, s, padding=p, data_format="NHWC") out_backprop = tf.placeholder(tf.float64, outp.shape) filter_gradients = gen_nn_ops.conv2d_backprop_filter( input_placeholder, f, out_backprop, s, padding=p) test_grads = np.random.uniform(size=outp.shape).astype( np.float64) test_input = np.random.uniform(size=tuple(inp_shape)).astype( np.float64) output_tf = sess_tf.run(filter_gradients, feed_dict={ input_placeholder: test_input, out_backprop: test_grads }) output_dace = sess_dace.run(filter_gradients, feed_dict={ input_placeholder: test_input, out_backprop: test_grads }) try: assert tf.norm(output_dace - output_tf).eval(session=sess_tf) < 1e-10 except: print(p) print(f) print(s) print(output_tf) print(output_dace) print( tf.linalg.norm(output_tf - output_dace).eval(session=sess_tf)) raise AssertionError("Convolution filter grad test failed")