def testNHWCToNCHW2D(self): x_val = [[7, 4], [9, 3], [4, 5], [5, 1]] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x) with self.test_session(use_gpu=test_lib.is_gpu_available()) as sess: y_val = sess.run(y) self.assertAllEqual(y_val, [[7, 4], [5, 1], [9, 3], [4, 5]])
def testHWNCToNHWC2D(self): x_val = [[7, 4], [9, 3], [4, 5], [5, 1]] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC") with self.session(use_gpu=test_lib.is_gpu_available()) as sess: y_val = sess.run(y) self.assertAllEqual(y_val, [[4, 5], [7, 4], [9, 3], [5, 1]])
def testNCHWToNHWC(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC") with self.test_session(use_gpu=test_lib.is_gpu_available()) as sess: y_val = sess.run(y) self.assertAllEqual(y_val, [7, 9, 3, 4])
def testNCHWToNHWC2D(self): x_val = [[7, 4], [9, 3], [4, 5], [5, 1]] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]])
def testNHWCToNCHW(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x) with self.session(use_gpu=test_lib.is_gpu_available()) as sess: y_val = sess.run(y) self.assertAllEqual(y_val, [7, 3, 4, 9])
def testNHWCToNCHW(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x) with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [7, 3, 4, 9])
def testHWNCToNHWC(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [9, 7, 4, 3])
def testHWNCToNHWC(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [9, 7, 4, 3])
def testNCHWToNHWC2D(self): x_val = [[7, 4], [9, 3], [4, 5], [5, 1]] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="NCHW", dst_format="NHWC") with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]])
def testNHWCToNCHW(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x) with test_util.use_gpu(): y_val = self.evaluate(y) self.assertAllEqual(y_val, [7, 3, 4, 9])
def test(self): x_val = [7, 4, 9, 3] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x) with self.test_session(use_gpu=test_lib.is_gpu_available()) as sess: y_val = sess.run(y) self.assertAllEqual(y_val, [7, 3, 4, 9])
def testHWNCToNHWC2D(self): x_val = [[7, 4], [9, 3], [4, 5], [5, 1]] x = constant_op.constant(x_val) y = nn_ops.data_format_vec_permute(x, src_format="HWNC", dst_format="NHWC") with self.session(use_gpu=test_lib.is_gpu_available()) as sess: y_val = sess.run(y) self.assertAllEqual(y_val, [[4, 5], [7, 4], [9, 3], [5, 1]])
def _runPermuteAndCompare(self, x, src_format, dst_format, expected): with self.cached_session() as session: with self.test_scope(): placeholder = array_ops.placeholder(dtypes.as_dtype(x.dtype), x.shape) param = {placeholder: x} output = nn_ops.data_format_vec_permute( placeholder, src_format=src_format, dst_format=dst_format) result = session.run(output, param) self.assertAllEqual(result, expected)
def run(self, x): q = x + 1 q_shape = array_ops.shape(q) # Add an OP that is not supported by TF-TRT. This allows TF-TRT to build # two engines. The first engine produces an int32 output and the second # engines has an int32 input and an int32 output. q = nn_ops.data_format_vec_permute( q_shape, src_format="NHWC", dst_format="NCHW") q = q * 2 return array_ops.identity(q, name="output")