def testInvalidBlockShape(self): tf_in = constant_op.constant(-3.5e+35, shape=[10, 20, 20], dtype=dtypes.float32) block_shape = constant_op.constant(-10, shape=[2], dtype=dtypes.int64) paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32) with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError), "block_shape must be positive"): array_ops.space_to_batch_nd(tf_in, block_shape, paddings)
def testOutputSizeOutOfBounds(self): tf_in = constant_op.constant(-3.5e+35, shape=[10, 19, 22], dtype=dtypes.float32) block_shape = constant_op.constant(1879048192, shape=[2], dtype=dtypes.int64) paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32) with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), "Negative.* dimension size caused by overflow"): array_ops.space_to_batch_nd(tf_in, block_shape, paddings)
def _testPad(self, inputs, block_shape, paddings, outputs): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) with self.session() as sess, self.test_scope(): for dtype in self.float_types: # TODO(b/68813416): Skip bfloat16's as the input type for direct is # float32 and results in a mismatch, while making testDirect provide the # correctly typed input results in 'no fill-function for data-type' # error. if dtype == dtypes.bfloat16.as_numpy_dtype: continue if dtype == np.float16: actual_inputs = np.array(inputs).astype(dtype) actual_paddings = np.array(paddings).astype(dtype) expected_outputs = np.array(outputs).astype(dtype) else: actual_inputs = inputs actual_paddings = paddings expected_outputs = outputs placeholder = array_ops.placeholder(dtype) # outputs = space_to_batch(inputs) x_tf = array_ops.space_to_batch_nd(placeholder, block_shape, actual_paddings) self.assertAllEqual( sess.run(x_tf, {placeholder: actual_inputs}), expected_outputs) # inputs = batch_to_space(outputs) placeholder = array_ops.placeholder(dtype) x_tf = array_ops.batch_to_space_nd(placeholder, block_shape, actual_paddings) self.assertAllEqual( sess.run(x_tf, {placeholder: expected_outputs}), actual_inputs)
def _testPad(self, inputs, block_shape, paddings, outputs): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) with self.test_session() as sess, self.test_scope(): for dtype in self.float_types: # TODO(b/68813416): Skip bfloat16's as the input type for direct is # float32 and results in a mismatch, while making testDirect provide the # correctly typed input results in 'no fill-function for data-type' # error. if dtype == dtypes.bfloat16.as_numpy_dtype: continue if dtype == np.float16: actual_inputs = np.array(inputs).astype(dtype) actual_paddings = np.array(paddings).astype(dtype) expected_outputs = np.array(outputs).astype(dtype) else: actual_inputs = inputs actual_paddings = paddings expected_outputs = outputs placeholder = array_ops.placeholder(dtype) # outputs = space_to_batch(inputs) x_tf = array_ops.space_to_batch_nd(placeholder, block_shape, actual_paddings) self.assertAllEqual( sess.run(x_tf, {placeholder: actual_inputs}), expected_outputs) # inputs = batch_to_space(outputs) placeholder = array_ops.placeholder(dtype) x_tf = array_ops.batch_to_space_nd(placeholder, block_shape, actual_paddings) self.assertAllEqual( sess.run(x_tf, {placeholder: expected_outputs}), actual_inputs)
def _testPad(self, inputs, block_shape, paddings, outputs): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) with self.test_session() as sess, self.test_scope(): for dtype in self.float_types: # TODO(b/68813416): Skip bfloat16's as the input type for direct is # float32 and results in a mismatch, while making testDirect provide the # correctly typed input results in 'no fill-function for data-type' # error. if dtype == dtypes.bfloat16.as_numpy_dtype: continue # TODO(b/77694432): Half test failed on CPU, last ran on 04-06-2018. if dtype == np.float16 and self.device == "XLA_CPU": continue placeholder = array_ops.placeholder(dtype) # outputs = space_to_batch(inputs) x_tf = array_ops.space_to_batch_nd(placeholder, block_shape, paddings) self.assertAllEqual(sess.run(x_tf, {placeholder: inputs}), outputs) # inputs = batch_to_space(outputs) placeholder = array_ops.placeholder(dtype) x_tf = array_ops.batch_to_space_nd(placeholder, block_shape, paddings) self.assertAllEqual(sess.run(x_tf, {placeholder: outputs}), inputs)
def _testStaticShape(self, input_shape, block_shape, paddings, error): block_shape = np.array(block_shape) paddings = np.array(paddings) # Try with sizes known at graph construction time. with self.assertRaises(error): _ = array_ops.space_to_batch_nd( np.zeros(input_shape, np.float32), block_shape, paddings)
def _testStaticShape(self, input_shape, block_shape, paddings, error): block_shape = np.array(block_shape) paddings = np.array(paddings) # Try with sizes known at graph construction time. with self.assertRaises(error): _ = array_ops.space_to_batch_nd(np.zeros(input_shape, np.float32), block_shape, paddings)
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype='int32'): data = np.random.uniform(0, 5, size=input_shape).astype(dtype) with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=input_shape, dtype=dtype) out = array_ops.space_to_batch_nd(in_data, block_shape, paddings) compare_tflite_with_tvm(data, 'Placeholder:0', [in_data], [out])
def _testPad(self, inputs, block_shape, paddings, outputs): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) with self.test_session() as sess, self.test_scope(): for dtype in self.float_types: placeholder = array_ops.placeholder(dtype) # outputs = space_to_batch(inputs) x_tf = array_ops.space_to_batch_nd(placeholder, block_shape, paddings) self.assertAllEqual(sess.run(x_tf, {placeholder: inputs}), outputs) # inputs = batch_to_space(outputs) placeholder = array_ops.placeholder(dtype) x_tf = array_ops.batch_to_space_nd(placeholder, block_shape, paddings) self.assertAllEqual(sess.run(x_tf, {placeholder: outputs}), inputs)
def _testPad(self, inputs, block_shape, paddings, outputs): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) for use_gpu in [False, True]: with self.cached_session(use_gpu=use_gpu): # outputs = space_to_batch(inputs) x_tf = array_ops.space_to_batch_nd( math_ops.cast(inputs, dtypes.float32), block_shape, paddings) self.assertAllEqual(x_tf, outputs) # inputs = batch_to_space(outputs) x_tf = array_ops.batch_to_space_nd( math_ops.cast(outputs, dtypes.float32), block_shape, paddings) self.assertAllEqual(x_tf, inputs)
def _testPad(self, inputs, block_shape, paddings, outputs): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) for use_gpu in [False, True]: with self.test_session(use_gpu=use_gpu): # outputs = space_to_batch(inputs) x_tf = array_ops.space_to_batch_nd( math_ops.to_float(inputs), block_shape, paddings) self.assertAllEqual(x_tf.eval(), outputs) # inputs = batch_to_space(outputs) x_tf = array_ops.batch_to_space_nd( math_ops.to_float(outputs), block_shape, paddings) self.assertAllEqual(x_tf.eval(), inputs)
def testInvalidBlockShape(self): with self.assertRaisesRegex(ValueError, "block_shape must be positive"): with self.session() as sess, self.test_scope(): tf_in = constant_op.constant(-3.5e+35, shape=[10, 20, 20], dtype=dtypes.float32) block_shape = constant_op.constant(-10, shape=[2], dtype=dtypes.int64) paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32) sess.run( array_ops.space_to_batch_nd(tf_in, block_shape, paddings))
def testOutputSizeOutOfBounds(self): with self.assertRaisesRegex( ValueError, "Negative.* dimension size caused by overflow"): with self.session() as sess, self.test_scope(): tf_in = constant_op.constant(-3.5e+35, shape=[10, 19, 22], dtype=dtypes.float32) block_shape = constant_op.constant(1879048192, shape=[2], dtype=dtypes.int64) paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32) sess.run( array_ops.space_to_batch_nd(tf_in, block_shape, paddings))
def _checkGrad(self, x, block_shape, paddings): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) with self.cached_session(): tf_x = ops.convert_to_tensor(x) tf_y = array_ops.space_to_batch_nd(tf_x, block_shape, paddings) epsilon = 1e-5 ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient( tf_x, x.shape, tf_y, tf_y.get_shape().as_list(), x_init_value=x, delta=epsilon) self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _checkGrad(self, x, block_shape, paddings): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) with self.test_session(): tf_x = ops.convert_to_tensor(x) tf_y = array_ops.space_to_batch_nd(tf_x, block_shape, paddings) epsilon = 1e-5 ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient( tf_x, x.shape, tf_y, tf_y.get_shape().as_list(), x_init_value=x, delta=epsilon) self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin """Call functionality for with_space_to_batch.""" # Handle input whose shape is unknown during graph creation. input_spatial_shape = None input_shape = self.input_shape spatial_dims = self.spatial_dims if input_shape.ndims is not None: input_shape_list = input_shape.as_list() input_spatial_shape = [input_shape_list[i] for i in spatial_dims] if input_spatial_shape is None or None in input_spatial_shape: input_shape_tensor = array_ops.shape(inp) input_spatial_shape = array_ops.stack( [input_shape_tensor[i] for i in spatial_dims]) base_paddings = self.base_paddings if base_paddings is None: # base_paddings could not be computed at build time since static filter # shape was not fully defined. filter_shape = array_ops.shape(filter) base_paddings = _with_space_to_batch_base_paddings( filter_shape, self.num_spatial_dims, self.rate_or_const_rate) paddings, crops = array_ops.required_space_to_batch_paddings( input_shape=input_spatial_shape, base_paddings=base_paddings, block_shape=self.dilation_rate) dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1, spatial_dims) paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims) crops = _with_space_to_batch_adjust(crops, 0, spatial_dims) input_converted = array_ops.space_to_batch_nd( input=inp, block_shape=dilation_rate, paddings=paddings) result = self.op(input_converted, filter) result_converted = array_ops.batch_to_space_nd( input=result, block_shape=dilation_rate, crops=crops) # Recover channel information for output shape if channels are not last. if self.data_format is not None and self.data_format.startswith("NC"): if not result_converted.shape[1].value and filter is not None: output_shape = result_converted.shape.as_list() output_shape[1] = filter.shape[-1] result_converted.set_shape(output_shape) return result_converted
def _testDynamicShape(self, input_shape, block_shape, paddings): block_shape = np.array(block_shape) paddings = np.array(paddings) # Try with sizes unknown at graph construction time. input_placeholder = array_ops.placeholder(dtypes.float32) block_shape_placeholder = array_ops.placeholder( dtypes.int32, shape=block_shape.shape) paddings_placeholder = array_ops.placeholder(dtypes.int32) t = array_ops.space_to_batch_nd(input_placeholder, block_shape_placeholder, paddings_placeholder) with self.assertRaises(ValueError): _ = t.eval({ input_placeholder: np.zeros(input_shape, np.float32), block_shape_placeholder: block_shape, paddings_placeholder: paddings })
def testUnknown(self): # Verify that input shape and paddings shape can be unknown. _ = array_ops.space_to_batch_nd( array_ops.placeholder(dtypes.float32), array_ops.placeholder( dtypes.int32, shape=(2,)), array_ops.placeholder(dtypes.int32)) # Only number of input dimensions is known. t = array_ops.space_to_batch_nd( array_ops.placeholder( dtypes.float32, shape=(None, None, None, None)), array_ops.placeholder( dtypes.int32, shape=(2,)), array_ops.placeholder(dtypes.int32)) self.assertEqual(4, t.get_shape().ndims) # Dimensions are partially known. t = array_ops.space_to_batch_nd( array_ops.placeholder( dtypes.float32, shape=(None, None, None, 2)), array_ops.placeholder( dtypes.int32, shape=(2,)), array_ops.placeholder(dtypes.int32)) self.assertEqual([None, None, None, 2], t.get_shape().as_list()) # Dimensions are partially known. t = array_ops.space_to_batch_nd( array_ops.placeholder( dtypes.float32, shape=(3, None, None, 2)), [2, 3], array_ops.placeholder(dtypes.int32)) self.assertEqual([3 * 2 * 3, None, None, 2], t.get_shape().as_list()) # Dimensions are partially known. t = array_ops.space_to_batch_nd( array_ops.placeholder( dtypes.float32, shape=(3, None, 2, 2)), [2, 3], [[1, 1], [0, 1]]) self.assertEqual([3 * 2 * 3, None, 1, 2], t.get_shape().as_list()) # Dimensions are fully known. t = array_ops.space_to_batch_nd( array_ops.placeholder( dtypes.float32, shape=(3, 2, 3, 2)), [2, 3], [[1, 1], [0, 0]]) self.assertEqual([3 * 2 * 3, 2, 1, 2], t.get_shape().as_list())
def _BatchToSpaceNDGrad(op, grad): # Its gradient is the opposite op: SpaceToBatchND. return [array_ops.space_to_batch_nd(grad, op.inputs[1], op.inputs[2]), None, None]
def _BatchToSpaceNDGrad(op, grad): # Its gradient is the opposite op: SpaceToBatchND. return [ array_ops.space_to_batch_nd(grad, op.inputs[1], op.inputs[2]), None, None ]
def loop_fn(i): x1 = array_ops.gather(x, i) return array_ops.space_to_batch_nd(x1, block_shapes, paddings)