def testSimpleAndRandomInputs(self): if np.__version__ == "1.13.0": self.skipTest("numpy 1.13.0 bug") sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape) with test_util.force_cpu(): self._compare_all(sp_t, None, ndims=2) self._compare_all(sp_t, 0, ndims=2) self._compare_all(sp_t, [1], ndims=2) self._compare_all(sp_t, [0, 1], ndims=2) self._compare_all(sp_t, [1, 0], ndims=2) self._compare_all(sp_t, [-1], ndims=2) self._compare_all(sp_t, [1, -2], ndims=2) np.random.seed(1618) test_dims = [(1618, 1, 11, 7, 1), (1,), (1, 1, 1)] with test_util.force_cpu(): for dims in test_dims: sp_t, unused_nnz = _sparsify(np.random.randn(*dims)) # reduce all using None self._compare_all(sp_t, None, ndims=len(dims)) # reduce random axes from 1D to N-D for d in range(1, len(dims) + 1): axes = np.random.choice(len(dims), size=d, replace=False).tolist() self._compare_all(sp_t, axes, ndims=len(dims))
def disabledtestSimpleAndRandomInputs(self): if np.__version__ == "1.13.0": self.skipTest("numpy 1.13.0 bug") sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape) with test_util.force_cpu(): self._compare_all(sp_t, None, ndims=2) self._compare_all(sp_t, 0, ndims=2) self._compare_all(sp_t, [1], ndims=2) self._compare_all(sp_t, [0, 1], ndims=2) self._compare_all(sp_t, [1, 0], ndims=2) self._compare_all(sp_t, [-1], ndims=2) self._compare_all(sp_t, [1, -2], ndims=2) np.random.seed(1618) test_dims = [(1618, 1, 11, 7, 1), (1, ), (1, 1, 1)] with test_util.force_cpu(): for dims in test_dims: sp_t, unused_nnz = _sparsify(np.random.randn(*dims)) # reduce all using None self._compare_all(sp_t, None, ndims=len(dims)) # reduce random axes from 1D to N-D for d in range(1, len(dims) + 1): axes = np.random.choice(len(dims), size=d, replace=False).tolist() self._compare_all(sp_t, axes, ndims=len(dims))
def testPowNegativeExponent(self): for dtype in [np.int32, np.int64]: with test_util.force_cpu(): with self.assertRaisesRegexp( errors_impl.InvalidArgumentError, "Integers to negative integer powers are not allowed"): x = np.array([5, 2]).astype(dtype) y = np.array([-2, 3]).astype(dtype) self.evaluate(math_ops.pow(x, y)) with test_util.force_cpu(): with self.assertRaisesRegexp( errors_impl.InvalidArgumentError, "Integers to negative integer powers are not allowed"): x = np.array([5, 2]).astype(dtype) y = np.array([2, -3]).astype(dtype) self.evaluate(math_ops.pow(x, y)) with test_util.force_cpu(): with self.assertRaisesRegexp( errors_impl.InvalidArgumentError, "Integers to negative integer powers are not allowed"): x = np.array([5, 2]).astype(dtype) y = -3 self.evaluate(math_ops.pow(x, y))
def testPowNegativeExponentCpu(self): for dtype in [np.int32, np.int64]: with test_util.force_cpu(): with self.assertRaisesRegex( errors_impl.InvalidArgumentError, "Integers to negative integer powers are not allowed"): x = np.array([5, 2]).astype(dtype) y = np.array([-2, 3]).astype(dtype) self.evaluate(math_ops.pow(x, y)) with test_util.force_cpu(): with self.assertRaisesRegex( errors_impl.InvalidArgumentError, "Integers to negative integer powers are not allowed"): x = np.array([5, 2]).astype(dtype) y = np.array([2, -3]).astype(dtype) self.evaluate(math_ops.pow(x, y)) with test_util.force_cpu(): with self.assertRaisesRegex( errors_impl.InvalidArgumentError, "Integers to negative integer powers are not allowed"): x = np.array([5, 2]).astype(dtype) y = -3 self.evaluate(math_ops.pow(x, y))
def testHigherRanks(self): # For the first shape: # First batch: # [? e.] # [1. ? ] # Second batch: # [e ? ] # [e e ] # # The softmax results should be: # [? 1.] [1 ?] # [1. ? ] and [.5 .5] # where ? means implicitly zero. # # The second shape: same input data, but with a higher-rank shape. shapes = [[2, 2, 2], [2, 1, 2, 2]] for shape in shapes: values = np.asarray([0., np.e, 1., 0., np.e, 0., np.e, np.e]).reshape(shape) sp_t, unused_nnz = _sparsify(values, thresh=1e-2) expected_values = [1., 1., 1., .5, .5] with test_util.force_cpu(): result = sparse_ops.sparse_softmax(sp_t) self.assertAllEqual(expected_values, result.values) self.assertAllEqual(sp_t.indices, result.indices) self.assertAllEqual(shape, result.dense_shape)
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False): np_ans = np_func(x, y) with test_util.force_cpu(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_cpu = self.evaluate(out) # Test that the op takes precedence over numpy operators. np_left = self.evaluate(tf_func(x, iny)) np_right = self.evaluate(tf_func(inx, y)) if also_compare_variables: var_x = variables.Variable(x) var_y = variables.Variable(y) self.evaluate(variables.global_variables_initializer()) print(type(x), type(y), type(var_x), type(var_y)) print(type(tf_func(x, var_y)), type(tf_func(var_x, y))) np_var_left = self.evaluate(tf_func(x, var_y)) np_var_right = self.evaluate(tf_func(var_x, y)) if np_ans.dtype != np.object_: self.assertAllClose(np_ans, tf_cpu) self.assertAllClose(np_ans, np_left) self.assertAllClose(np_ans, np_right) if also_compare_variables: self.assertAllClose(np_ans, np_var_left) self.assertAllClose(np_ans, np_var_right) self.assertShapeEqual(np_ans, out)
def testBackward(self): with self.session(), test_util.force_cpu(): for logits_dtype in [np.float16, np.float32, np.float64]: for labels_dtype in [np.int32, np.int64]: labels, logits = self._generateInputs(labels_dtype, logits_dtype, seed=456) output_shape = labels.shape[0] def gradients(seed=789): np.random.seed(seed) upstream_gradients = self._randomFloats( output_shape, logits_dtype) with backprop.GradientTape(persistent=True) as tape: tape.watch(logits) op_output = nn_ops.sparse_softmax_cross_entropy_with_logits_v2( labels=labels, logits=logits) gradient_injector_output = op_output * upstream_gradients return tape.gradient(gradient_injector_output, logits) for trial in range(5): seed = 456 + trial result_a = gradients(seed=seed) result_b = gradients(seed=seed) self.assertAllEqual(result_a, result_b)
def testCwiseDivAndMul(self): np.random.seed(1618) sp_shapes = [(10, 10, 10), (5, 5), (1618, ), (3, 3, 7)] dense_shapes = [(10, 10, 1), (5, 5), (1, ), (1, 7)] with test_util.force_cpu(): for dtype in [np.float32, np.float64, np.int32, np.int64]: for sp_shape, dense_shape in zip(sp_shapes, dense_shapes): sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1 dense_vals_np = np.random.rand( *dense_shape).astype(dtype) + 1 sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5) sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t) dense_t = constant_op.constant(dense_vals_np) self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t) # Check commutative. self._check(sp_t * dense_t, sp_t_densified * dense_vals_np, sp_t) self._check(dense_t * sp_t, sp_t_densified * dense_vals_np, sp_t) if dtype in [np.int32, np.int64]: res = sp_t / dense_t # should invoke "__truediv__" self.assertEqual(res.values.dtype, np.float64)
def testHigherRanks(self): # For the first shape: # First batch: # [? e.] # [1. ? ] # Second batch: # [e ? ] # [e e ] # # The softmax results should be: # [? 1.] [1 ?] # [1. ? ] and [.5 .5] # where ? means implicitly zero. # # The second shape: same input data, but with a higher-rank shape. shapes = [[2, 2, 2], [2, 1, 2, 2]] for shape in shapes: values = np.asarray( [0., np.e, 1., 0., np.e, 0., np.e, np.e]).reshape(shape) sp_t, unused_nnz = _sparsify(values, thresh=1e-2) expected_values = [1., 1., 1., .5, .5] with test_util.force_cpu(): result = sparse_ops.sparse_softmax(sp_t) self.assertAllEqual(expected_values, result.values) self.assertAllEqual(sp_t.indices, result.indices) self.assertAllEqual(shape, result.dense_shape)
def testSmallValuesShouldVanish(self): with test_util.force_cpu(): sp_a = self._SparseTensor_3x3() sp_b = self._SparseTensor_3x3_v2() # sum: # [ 2] # [.1 ] # [ 6 -.2] # two values should vanish: |.1| < .21, and |-.2| < .21 sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.21) sum_out = self.evaluate(sp_sum) self.assertEqual(sp_sum.dense_shape.get_shape(), [2]) self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0]]) self.assertAllEqual(sum_out.values, [2, 6]) self.assertAllEqual(sum_out.dense_shape, [3, 3]) # only .1 vanishes sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.11) sum_out = self.evaluate(sp_sum) self.assertEqual(sp_sum.dense_shape.get_shape(), [2]) self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0], [2, 1]]) self.assertAllClose(sum_out.values, [2, 6, -.2]) self.assertAllEqual(sum_out.dense_shape, [3, 3])
def _testReproducibleBackprop(self, test_image_not_boxes): with test_util.force_cpu(): for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: params = self._genParams(dtype) image, boxes, box_indices, crop_size, injected_gradients = params with backprop.GradientTape(persistent=True) as tape: tape.watch([image, boxes]) output = image_ops.crop_and_resize_v2(image, boxes, box_indices, crop_size, method='bilinear') upstream = output * injected_gradients image_gradients_a, boxes_gradients_a = tape.gradient( upstream, [image, boxes]) for _ in range(5): image_gradients_b, boxes_gradients_b = tape.gradient( upstream, [image, boxes]) if test_image_not_boxes: self.assertAllEqual(image_gradients_a, image_gradients_b) else: self.assertAllEqual(boxes_gradients_a, boxes_gradients_b)
def testBadIndicesCPU(self): with test_util.force_cpu(): params = [[0, 1, 2], [3, 4, 5]] with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"): self.evaluate(array_ops.gather(params, [[7]], axis=0)) with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"): self.evaluate(array_ops.gather(params, [[7]], axis=1))
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False): np_ans = np_func(x, y) with test_util.force_cpu(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = tf_func(inx, iny) tf_cpu = self.evaluate(out) # Test that the op takes precedence over numpy operators. np_left = self.evaluate(tf_func(x, iny)) np_right = self.evaluate(tf_func(inx, y)) if also_compare_variables: var_x = variables.Variable(x) var_y = variables.Variable(y) self.evaluate(variables.global_variables_initializer()) print(type(x), type(y), type(var_x), type(var_y)) print(type(tf_func(x, var_y)), type(tf_func(var_x, y))) np_var_left = self.evaluate(tf_func(x, var_y)) np_var_right = self.evaluate(tf_func(var_x, y)) if np_ans.dtype != np.object: self.assertAllClose(np_ans, tf_cpu) self.assertAllClose(np_ans, np_left) self.assertAllClose(np_ans, np_right) if also_compare_variables: self.assertAllClose(np_ans, np_var_left) self.assertAllClose(np_ans, np_var_right) self.assertShapeEqual(np_ans, out)
def testGradientsExplicit(self): sp_input = self._SparseTensor_4x6() # SparseSliceGrad does not currently have a GPU kernel. with test_util.force_cpu(): start, size = [0, 0], [4, 1] sp_output = sparse_ops.sparse_slice(sp_input, start, size) input_grad_vals = sparse_ops.sparse_slice_grad( sp_output.values, sp_input.indices, start, sp_output.indices) self.assertAllEqual(input_grad_vals, [0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 30, 0, 0, 0]) start, size = [0, 1], [4, 1] sp_output = sparse_ops.sparse_slice(sp_input, start, size) input_grad_vals = sparse_ops.sparse_slice_grad( sp_output.values, sp_input.indices, start, sp_output.indices) self.assertAllEqual(input_grad_vals, [0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0]) start, size = [1, 3], [3, 1] sp_output = sparse_ops.sparse_slice(sp_input, start, size) input_grad_vals = sparse_ops.sparse_slice_grad( sp_output.values, sp_input.indices, start, sp_output.indices) self.assertAllEqual(input_grad_vals, [0, 0, 0, 0, 0, 13, 0, 0, 23, 0, 0, 0, 33, 0]) sp_input = self._SparseTensor_4x6_empty() start, size = [0, 0], [4, 1] sp_output = sparse_ops.sparse_slice(sp_input, start, size) input_grad_vals = sparse_ops.sparse_slice_grad( sp_output.values, sp_input.indices, start, sp_output.indices) self.assertAllEqual(input_grad_vals, [])
def testBadIndicesCPU(self): with test_util.force_cpu(): params = [[0, 1, 2], [3, 4, 5]] with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"): self.evaluate(array_ops.gather(params, [[7]], axis=0)) with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"): self.evaluate(array_ops.gather(params, [[7]], axis=1))
def testSmallValuesShouldVanish(self): with test_util.force_cpu(): sp_a = self._SparseTensor_3x3() sp_b = self._SparseTensor_3x3_v2() # sum: # [ 2] # [.1 ] # [ 6 -.2] # two values should vanish: |.1| < .21, and |-.2| < .21 sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.21) sum_out = self.evaluate(sp_sum) self.assertEqual(sp_sum.dense_shape.get_shape(), [2]) self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0]]) self.assertAllEqual(sum_out.values, [2, 6]) self.assertAllEqual(sum_out.dense_shape, [3, 3]) # only .1 vanishes sp_sum = sparse_ops.sparse_add(sp_a, sp_b, thresh=0.11) sum_out = self.evaluate(sp_sum) self.assertEqual(sp_sum.dense_shape.get_shape(), [2]) self.assertAllEqual(sum_out.indices, [[0, 1], [2, 0], [2, 1]]) self.assertAllClose(sum_out.values, [2, 6, -.2]) self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testInvalidRank(self): with test_util.force_cpu(): sp_input = self._SparseTensor_2x5x6() new_shape = np.array([3, 7], dtype=np.int64) with self.assertRaises(ValueError): sparse_ops.sparse_reset_shape(sp_input, new_shape)
def testInvalidRank(self): with test_util.force_cpu(): sp_input = self._SparseTensor_2x5x6() new_shape = np.array([3, 7], dtype=np.int64) with self.assertRaises(ValueError): sparse_ops.sparse_reset_shape(sp_input, new_shape)
def testInt64AndFloat64Shape(self): vocab_size = [50, 30] with test_util.force_cpu(): indices, values = self._SparseTensor_3x50(np.int64, np.float64) sp_output = sparse_ops.sparse_merge(indices, values, vocab_size) output = self.evaluate(sp_output) self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat32(self): vocab_size = 50 with test_util.force_cpu(): indices, values = self._SparseTensor_3x50(np.int64, np.float32) sp_output = sparse_ops.sparse_merge(indices, values, vocab_size) output = sess.run(sp_output) self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64Shape(self): vocab_size = [50, 30] with test_util.force_cpu(): indices, values = self._SparseTensor_3x50(np.int64, np.float64) sp_output = sparse_ops.sparse_merge(indices, values, vocab_size) output = self.evaluate(sp_output) self._AssertResultsSorted(output, vocab_size)
def compareToTranspose(self, batch_size, out_height, out_width, in_channels, block_size, data_format, data_type, use_gpu): in_height = out_height * block_size in_width = out_width * block_size nhwc_input_shape = [batch_size, in_height, in_width, in_channels] nchw_input_shape = [batch_size, in_channels, in_height, in_width] total_size = np.prod(nhwc_input_shape) # Construct the input tensor in data_type and NHWC. # force_cpu is needed because quantize_v2 runs on only CPU. with test_util.force_cpu(): if data_type == dtypes.qint8: # Initialize the input tensor with qint8 values that circle -127..127. x = [((f + 128) % 255) - 127 for f in range(total_size)] t = constant_op.constant(x, shape=nhwc_input_shape, dtype=dtypes.float32) t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8) else: assert data_type == dtypes.float32 # Initialize the input tensor with ascending whole numbers as floats. x = [f * 1.0 for f in range(total_size)] shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape t = constant_op.constant(x, shape=shape, dtype=dtypes.float32) with test_util.device(use_gpu): if data_format == "NCHW_VECT_C": assert data_type == dtypes.qint8 # Convert to int8, then NHWCToNCHW_VECT_C, and then back to qint8. actual = array_ops.bitcast(t, dtypes.int8) actual = test_util.NHWCToNCHW_VECT_C(actual) actual = array_ops.bitcast(actual, dtypes.qint8) actual = array_ops.space_to_depth(actual, block_size, data_format=data_format) actual = array_ops.bitcast(actual, dtypes.int8) actual = test_util.NCHW_VECT_CToNHWC(actual) actual = array_ops.bitcast(actual, dtypes.qint8) expected = array_ops.bitcast(t, dtypes.int8) expected = math_ops.cast(expected, dtypes.float32) expected = self.spaceToDepthUsingTranspose( expected, block_size, "NHWC") expected = math_ops.cast(expected, dtypes.int8) expected = array_ops.bitcast(expected, dtypes.qint8) else: # Initialize the input tensor with ascending whole numbers as floats. actual = array_ops.space_to_depth(t, block_size, data_format=data_format) expected = self.spaceToDepthUsingTranspose( t, block_size, data_format) actual_vals, expected_vals = self.evaluate([actual, expected]) self.assertTrue(np.array_equal(actual_vals, expected_vals))
def testInt64AndFloat32NonCanonicalOrder(self): vocab_size = 50 with test_util.force_cpu(): indices, values = self._SparseTensor_3x50(np.int64, np.float32) sp_output = sparse_ops.sparse_merge( indices, values, vocab_size, already_sorted=True) output = self.evaluate(sp_output) self._AssertResultsNotSorted(output, vocab_size)
def testStringComparison(self): x = np.array([["abc", "bh"], ["c", ""]]) y = np.array([["abc", "bh"], ["def", "hi"]]) with test_util.force_cpu(): cmp_eq = math_ops.equal(x, y) cmp_not_eq = math_ops.not_equal(x, y) values = self.evaluate([cmp_eq, cmp_not_eq]) self.assertAllEqual([[True, True], [False, False]], values[0]) self.assertAllEqual([[False, False], [True, True]], values[1])
def testStringComparison(self): x = np.array([["abc", "bh"], ["c", ""]]) y = np.array([["abc", "bh"], ["def", "hi"]]) with test_util.force_cpu(): cmp_eq = math_ops.equal(x, y) cmp_not_eq = math_ops.not_equal(x, y) values = self.evaluate([cmp_eq, cmp_not_eq]) self.assertAllEqual([[True, True], [False, False]], values[0]) self.assertAllEqual([[False, False], [True, True]], values[1])
def testInt64AndFloat32NonCanonicalOrder(self): vocab_size = 50 with test_util.force_cpu(): indices, values = self._SparseTensor_3x50(np.int64, np.float32) sp_output = sparse_ops.sparse_merge( indices, values, vocab_size, already_sorted=True) output = self.evaluate(sp_output) self._AssertResultsNotSorted(output, vocab_size)
def testTightBoundingBoxEmpty(self): with test_util.force_cpu(): sp_input = self._SparseTensor_2x5x6_Empty() sp_output = sparse_ops.sparse_reset_shape(sp_input) output = self.evaluate(sp_output) self.assertAllEqual(output.indices.shape, [0, 3]) self.assertAllEqual(output.values.shape, [0]) self.assertAllEqual(output.dense_shape, [0, 0, 0])
def testTightBoundingBoxEmpty(self): with test_util.force_cpu(): sp_input = self._SparseTensor_2x5x6_Empty() sp_output = sparse_ops.sparse_reset_shape(sp_input) output = self.evaluate(sp_output) self.assertAllEqual(output.indices.shape, [0, 3]) self.assertAllEqual(output.values.shape, [0]) self.assertAllEqual(output.dense_shape, [0, 0, 0])
def testForward(self): with self.session(), test_util.force_cpu(): for dtype in [np.float16, np.float32, np.float64]: for trial in range(5): seed = 123 + trial labels, logits = self._generateInputs(dtype, seed=seed) result_a = nn_ops.softmax_cross_entropy_with_logits_v2( labels=labels, logits=logits) result_b = nn_ops.softmax_cross_entropy_with_logits_v2( labels=labels, logits=logits) self.assertAllEqual(result_a, result_b)
def testInvalidAxes(self): sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape) with test_util.force_cpu(): with self.assertRaisesOpError("Invalid reduction dimension -3"): self.evaluate(sparse_ops.sparse_reduce_sum(sp_t, -3)) with self.assertRaisesOpError("Invalid reduction dimension 2"): self.evaluate(sparse_ops.sparse_reduce_sum(sp_t, 2)) with self.assertRaisesOpError("Invalid reduction dimension -3"): self.evaluate(sparse_ops.sparse_reduce_max(sp_t, -3)) with self.assertRaisesOpError("Invalid reduction dimension 2"): self.evaluate(sparse_ops.sparse_reduce_max(sp_t, 2))
def testCwiseShapeValidation(self): # Test case for GitHub 24072. with test_util.force_cpu(): a = array_ops.ones([3, 4, 1], dtype=dtypes.int32) b = sparse_tensor.SparseTensor([[0, 0, 1, 0], [0, 0, 3, 0]], [10, 20], [1, 1, 4, 2]) c = a * b with self.assertRaisesRegexp( errors.InvalidArgumentError, "broadcasts dense to sparse only; got incompatible shapes"): self.evaluate(c)
def testInt64(self): with test_util.force_cpu(): sp_input = self._SparseTensor_5x6(dtypes.int64) output = sparse_ops.sparse_to_indicator(sp_input, 50) expected_output = np.zeros((5, 50), dtype=np.bool) expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)] for expected_true in expected_trues: expected_output[expected_true] = True self.assertAllEqual(output, expected_output)
def testInvalidAxes(self): sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape) with test_util.force_cpu(): with self.assertRaisesOpError("Invalid reduction dimension -3"): self.evaluate(sparse_ops.sparse_reduce_sum(sp_t, -3)) with self.assertRaisesOpError("Invalid reduction dimension 2"): self.evaluate(sparse_ops.sparse_reduce_sum(sp_t, 2)) with self.assertRaisesOpError("Invalid reduction dimension -3"): self.evaluate(sparse_ops.sparse_reduce_max(sp_t, -3)) with self.assertRaisesOpError("Invalid reduction dimension 2"): self.evaluate(sparse_ops.sparse_reduce_max(sp_t, 2))
def testMismatchedShapes(self): with test_util.force_cpu(): sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1]) sp_one = sparse_tensor.SparseTensor([[0]], [1], [2]) with self.assertRaisesOpError("Operands do not have the same ranks"): self.evaluate(sparse_ops.sparse_maximum(sp_zero, sp_one)) sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1]) sp_one = sparse_tensor.SparseTensor([[0]], [1], [2]) with self.assertRaisesOpError("Operands' shapes do not match"): self.evaluate(sparse_ops.sparse_maximum(sp_zero, sp_one))
def testMismatchedShapes(self): with test_util.force_cpu(): sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1]) sp_one = sparse_tensor.SparseTensor([[0]], [1], [2]) with self.assertRaisesOpError("Operands do not have the same ranks"): self.evaluate(sparse_ops.sparse_maximum(sp_zero, sp_one)) sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1]) sp_one = sparse_tensor.SparseTensor([[0]], [1], [2]) with self.assertRaisesOpError("Operands' shapes do not match"): self.evaluate(sparse_ops.sparse_maximum(sp_zero, sp_one))
def testRetainNone(self): with test_util.force_cpu(): sp_input = self._SparseTensor_5x6() to_retain = np.zeros((6, ), dtype=np.bool) sp_output = sparse_ops.sparse_retain(sp_input, to_retain) output = self.evaluate(sp_output) self.assertAllEqual(output.indices, np.array([]).reshape((0, 2))) self.assertAllEqual(output.values, []) self.assertAllEqual(output.dense_shape, [5, 6])
def testCwiseShapeValidation(self): # Test case for GitHub 24072. with test_util.force_cpu(): a = array_ops.ones([3, 4, 1], dtype=dtypes.int32) b = sparse_tensor.SparseTensor([[0, 0, 1, 0], [0, 0, 3, 0]], [10, 20], [1, 1, 4, 2]) c = a * b with self.assertRaisesRegex( errors.InvalidArgumentError, "broadcasts dense to sparse only; got incompatible shapes"): self.evaluate(c)
def testRetainNone(self): with test_util.force_cpu(): sp_input = self._SparseTensor_5x6() to_retain = np.zeros((6,), dtype=np.bool) sp_output = sparse_ops.sparse_retain(sp_input, to_retain) output = self.evaluate(sp_output) self.assertAllEqual(output.indices, np.array([]).reshape((0, 2))) self.assertAllEqual(output.values, []) self.assertAllEqual(output.dense_shape, [5, 6])
def testTightBoundingBox(self): with test_util.force_cpu(): sp_input = self._SparseTensor_2x5x6() sp_output = sparse_ops.sparse_reset_shape(sp_input) output = self.evaluate(sp_output) self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]]) self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33]) self.assertAllEqual(output.dense_shape, [2, 4, 5])
def testBasic(self): with test_util.force_cpu(): for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()): to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool) sp_output = sparse_ops.sparse_retain(sp_input, to_retain) output = self.evaluate(sp_output) self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]]) self.assertAllEqual(output.values, [0, 14, 32]) self.assertAllEqual(output.dense_shape, [5, 6])
def testInt64(self): with test_util.force_cpu(): sp_input = self._SparseTensor_5x6(dtypes.int64) output = sparse_ops.sparse_to_indicator(sp_input, 50) expected_output = np.zeros((5, 50), dtype=np.bool_) expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)] for expected_true in expected_trues: expected_output[expected_true] = True self.assertAllEqual(output, expected_output)
def testTightBoundingBox(self): with test_util.force_cpu(): sp_input = self._SparseTensor_2x5x6() sp_output = sparse_ops.sparse_reset_shape(sp_input) output = self.evaluate(sp_output) self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]]) self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33]) self.assertAllEqual(output.dense_shape, [2, 4, 5])
def testBasic(self): with test_util.force_cpu(): for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()): to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool_) sp_output = sparse_ops.sparse_retain(sp_input, to_retain) output = self.evaluate(sp_output) self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]]) self.assertAllEqual(output.values, [0, 14, 32]) self.assertAllEqual(output.dense_shape, [5, 6])
def testHigherRank(self): with test_util.force_cpu(): sp_input = self._SparseTensor_2x3x4(dtypes.int64) output = sparse_ops.sparse_to_indicator(sp_input, 200) expected_output = np.zeros((2, 3, 200), dtype=np.bool) expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103), (1, 1, 149), (1, 1, 150), (1, 2, 122)] for expected_true in expected_trues: expected_output[expected_true] = True self.assertAllEqual(output, expected_output)
def testHigherRank(self): with test_util.force_cpu(): sp_input = self._SparseTensor_2x3x4(dtypes.int64) output = sparse_ops.sparse_to_indicator(sp_input, 200) expected_output = np.zeros((2, 3, 200), dtype=np.bool) expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103), (1, 1, 149), (1, 1, 150), (1, 2, 122)] for expected_true in expected_trues: expected_output[expected_true] = True self.assertAllEqual(output, expected_output)
def testAddSelfAndNegation(self): with test_util.force_cpu(): sp_a = self._SparseTensor_3x3() sp_b = self._SparseTensor_3x3(negate=True) sp_sum = sparse_ops.sparse_add(sp_a, sp_b, 0.1) sum_out = self.evaluate(sp_sum) self.assertEqual(sp_sum.dense_shape.get_shape(), [2]) self.assertAllEqual(sum_out.indices, np.empty([0, 2])) self.assertAllEqual(sum_out.values, []) self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testAddSelfAndNegation(self): with test_util.force_cpu(): sp_a = self._SparseTensor_3x3() sp_b = self._SparseTensor_3x3(negate=True) sp_sum = sparse_ops.sparse_add(sp_a, sp_b, 0.1) sum_out = self.evaluate(sp_sum) self.assertEqual(sp_sum.dense_shape.get_shape(), [2]) self.assertAllEqual(sum_out.indices, np.empty([0, 2])) self.assertAllEqual(sum_out.values, []) self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testInt32AndFloat32(self): vocab_size = 50 indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32) with test_util.force_cpu(): for indices in (indices_v, sparse_tensor.SparseTensor.from_value(indices_v)): for values in (values_v, sparse_tensor.SparseTensor.from_value(values_v)): sp_output = sparse_ops.sparse_merge(indices, values, vocab_size) output = self.evaluate(sp_output) self._AssertResultsSorted(output, vocab_size)
def testInputUnavailableInGraphConstructionOk(self): with test_util.force_cpu(): sp_input = self._SparseTensorValue_2x5x6() new_shape = np.array([3, 6, 7], dtype=np.int64) sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape) output = self.evaluate(sp_output) self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]]) self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33]) self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testValuesInVariable(self): indices = constant_op.constant([[1]], dtype=dtypes.int64) values = variables.Variable([1], trainable=False, dtype=dtypes.float32) shape = constant_op.constant([1], dtype=dtypes.int64) sp_input = sparse_tensor.SparseTensor(indices, values, shape) sp_output = sparse_ops.sparse_add(sp_input, sp_input) with test_util.force_cpu(): self.evaluate(variables.global_variables_initializer()) output = self.evaluate(sp_output) self.assertAllEqual(output.values, [2])
def testInt32AndFloat32(self): vocab_size = 50 indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32) with test_util.force_cpu(): for indices in (indices_v, sparse_tensor.SparseTensor.from_value(indices_v)): for values in (values_v, sparse_tensor.SparseTensor.from_value(values_v)): sp_output = sparse_ops.sparse_merge(indices, values, vocab_size) output = self.evaluate(sp_output) self._AssertResultsSorted(output, vocab_size)
def testInputUnavailableInGraphConstructionOk(self): with test_util.force_cpu(): sp_input = self._SparseTensorValue_2x5x6() new_shape = np.array([3, 6, 7], dtype=np.int64) sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape) output = self.evaluate(sp_output) self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]]) self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33]) self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testValuesInVariable(self): indices = constant_op.constant([[1]], dtype=dtypes.int64) values = variables.Variable([1], trainable=False, dtype=dtypes.float32) shape = constant_op.constant([1], dtype=dtypes.int64) sp_input = sparse_tensor.SparseTensor(indices, values, shape) sp_output = sparse_ops.sparse_add(sp_input, sp_input) with test_util.force_cpu(): self.evaluate(variables.global_variables_initializer()) output = self.evaluate(sp_output) self.assertAllEqual(output.values, [2])
def testAddSelf(self): with test_util.force_cpu(): for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()): for sp_b in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()): sp_sum = sparse_ops.sparse_add(sp_a, sp_b) self.assertAllEqual((3, 3), sp_sum.get_shape()) sum_out = self.evaluate(sp_sum) self.assertEqual(sp_sum.dense_shape.get_shape(), [2]) self.assertAllEqual(sum_out.indices, [[0, 1], [1, 0], [2, 0], [2, 1]]) self.assertAllEqual(sum_out.values, [2, 4, 6, 8]) self.assertAllEqual(sum_out.dense_shape, [3, 3])
def testSparseReduceSumOrMaxShape(self): sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape) with test_util.force_cpu(): for do_sum in [True, False]: for keep_dims in [True, False]: self._testSparseReduceShape(sp_t, None, 2, keep_dims, do_sum) self._testSparseReduceShape(sp_t, 0, 2, keep_dims, do_sum) self._testSparseReduceShape(sp_t, [1], 2, keep_dims, do_sum) self._testSparseReduceShape(sp_t, [0, 1], 2, keep_dims, do_sum) self._testSparseReduceShape(sp_t, [1, 0], 2, keep_dims, do_sum) self._testSparseReduceShape(sp_t, [-1], 2, keep_dims, do_sum) self._testSparseReduceShape(sp_t, [1, -2], 2, keep_dims, do_sum)
def testNoEmptyRows(self): with test_util.force_cpu(): sp_input = self._SparseTensor_2x6() sp_output, empty_row_indicator = ( sparse_ops.sparse_fill_empty_rows(sp_input, -1)) output, empty_row_indicator_out = self.evaluate( [sp_output, empty_row_indicator]) self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]]) self.assertAllEqual(output.values, [0, 10, 13, 14]) self.assertAllEqual(output.dense_shape, [2, 6]) self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
def testAssignDependencyAcrossDevices(self): with test_util.use_gpu(): # The variable and an op to increment it are on the GPU. var = state_ops.variable_op([1], dtypes.float32) self.evaluate(state_ops.assign(var, [1.0])) increment = state_ops.assign_add(var, [1.0]) with ops.control_dependencies([increment]): with test_util.force_cpu(): # This mul op is pinned to the CPU, but reads the variable from the # GPU. The test ensures that the dependency on 'increment' is still # honored, i.e., the Send and Recv from GPU to CPU should take place # only after the increment. result = math_ops.multiply(var, var) self.assertAllClose([4.0], self.evaluate(result))
def testFillNumber(self): with test_util.force_cpu(): for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()): sp_output, empty_row_indicator = ( sparse_ops.sparse_fill_empty_rows(sp_input, -1)) output, empty_row_indicator_out = self.evaluate( [sp_output, empty_row_indicator]) self.assertAllEqual( output.indices, [[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]]) self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1]) self.assertAllEqual(output.dense_shape, [5, 6]) self.assertAllEqual(empty_row_indicator_out, np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testInvalidSparseTensor(self): with test_util.force_cpu(): shape = [2, 2] val = [0] dense = constant_op.constant(np.zeros(shape, dtype=np.int32)) for bad_idx in [ [[-1, 0]], # -1 is invalid. [[1, 3]], # ...so is 3. ]: sparse = sparse_tensor.SparseTensorValue(bad_idx, val, shape) s = sparse_ops.sparse_add(sparse, dense) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "invalid index"): self.evaluate(s)
def testFillString(self): with test_util.force_cpu(): sp_input = self._SparseTensor_String5x6() sp_output, empty_row_indicator = ( sparse_ops.sparse_fill_empty_rows(sp_input, "")) output, empty_row_indicator_out = self.evaluate( [sp_output, empty_row_indicator]) self.assertAllEqual( output.indices, [[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]]) self.assertAllEqual(output.values, [b"a", b"b", b"c", b"d", b"", b"e", b"f", b""]) self.assertAllEqual(output.dense_shape, [5, 6]) self.assertAllEqual(empty_row_indicator_out, np.array([0, 0, 1, 0, 1]).astype(np.bool))