示例#1
0
    def test_sparse_input_with_weights(self):
        input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 4]], dtype=np.int64)
        weights_array = np.array([[.1, .2, .3, .4], [.2, .1, .4, .3]])
        sparse_tensor_data = sparse_ops.from_dense(input_array)
        sparse_weight_data = sparse_ops.from_dense(weights_array)

        # pyformat: disable
        expected_output = [[0, .1, .2, .3, .4, 0], [0, .4, 0, .1, .5, 0]]
        # pyformat: enable
        max_tokens = 6
        expected_output_shape = [None, max_tokens]

        input_data = keras.Input(shape=(None, ),
                                 dtype=dtypes.int64,
                                 sparse=True)
        weight_data = keras.Input(shape=(None, ),
                                  dtype=dtypes.float32,
                                  sparse=True)

        layer = get_layer_class()(max_tokens=max_tokens,
                                  output_mode=category_encoding.COUNT)
        int_data = layer(input_data, count_weights=weight_data)
        self.assertAllEqual(expected_output_shape, int_data.shape.as_list())

        model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)
        output_dataset = model.predict(
            [sparse_tensor_data, sparse_weight_data], steps=1)
        self.assertAllClose(expected_output, output_dataset)
    def test_sparse_adapt(self):
        vocab_data = sparse_ops.from_dense(
            np.array([[1, 1, 0, 1, 1, 2, 2, 0, 2, 3, 3, 0, 4]],
                     dtype=np.int64))
        vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
        input_array = sparse_ops.from_dense(
            np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64))

        # pyformat: disable
        expected_output = [[0, 1, 1, 1, 0], [0, 1, 0, 1, 0]]
        # pyformat: enable
        max_tokens = 5
        expected_output_shape = [None, max_tokens]

        input_data = keras.Input(shape=(None, ),
                                 dtype=dtypes.int64,
                                 sparse=True)
        layer = get_layer_class()(max_tokens=None,
                                  output_mode=categorical_encoding.BINARY)
        layer.adapt(vocab_dataset)
        int_data = layer(input_data)
        self.assertAllEqual(expected_output_shape, int_data.shape.as_list())

        model = keras.Model(inputs=input_data, outputs=int_data)
        output_dataset = model.predict(input_array, steps=1)
        self.assertAllEqual(expected_output, output_dataset)
 def test_sparse_input_too_many_indices_fails(self):
   x = sparse_ops.from_dense(
       np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
   weights = sparse_ops.from_dense(
       np.array([[3, 1, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
   with self.assertRaisesIncompatibleShapesError():
     self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
示例#4
0
 def test_sparse_input_wrong_indices_fails(self):
   x = sparse_ops.from_dense(
       np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
   weights = sparse_ops.from_dense(
       np.array([[3, 1, 0, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
   with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                "must have the same indices"):
     self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
示例#5
0
 def test_sparse_input_too_many_indices_fails(self):
     x = sparse_ops.from_dense(
         np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
                  dtype=np.int32))
     weights = sparse_ops.from_dense(
         np.array([[3, 1, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
                  dtype=np.int32))
     with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                  "Incompatible shapes"):
         self.evaluate(bincount.sparse_bincount(x, weights=weights,
                                                axis=-1))
示例#6
0
    def testMapValues(self):
        # supplying no sparse tensor should result in ValueError
        with self.assertRaises(ValueError):
            sparse_ops.map_values(math_ops.abs, 0.0)

        sp = sparse_ops.from_dense([[0.0, 1.0, 0.0], [-2.0, 1.0, 0.0]])

        # helper function to check equality of sparse tensor
        def assert_sparse_equal(expected, result):
            self.assertAllEqual(expected.values,
                                result.values,
                                msg='Values differ')
            self.assertAllEqual(expected.indices,
                                result.indices,
                                msg='Indices differ')
            self.assertAllEqual(expected.dense_shape,
                                result.dense_shape,
                                msg='Shapes differ')

        # check for a single sparse argument
        expected = sparse_ops.from_dense([[0.0, 1.0, 0.0], [2.0, 1.0, 0.0]])
        result = sparse_ops.map_values(math_ops.abs, sp)
        assert_sparse_equal(expected, result)

        # check correct passing of keyword argument, and handling of two sparse
        # arguments at the same time
        def mapping(arg1, arg2, kwarg):
            self.assertEqual(kwarg, 'kwarg')
            return arg1 + arg2

        result = sparse_ops.map_values(mapping, sp, sp, kwarg='kwarg')
        expected = sparse_ops.from_dense([[0.0, 2.0, 0.0], [-4.0, 2.0, 0.0]])
        assert_sparse_equal(expected, result)

        # check that index mismatches are correctly detected even if the `value`s
        # have compatible shape
        sp_incomp = sparse_ops.from_dense([[0.0, 1.0, 0.0], [-2.0, 0.0, 1.0]])
        with self.assertRaises((errors.InvalidArgumentError, ValueError)):
            result = sparse_ops.map_values(mapping,
                                           sp,
                                           sp_incomp,
                                           kwarg='kwarg')
            self.evaluate(result)

        # check that shape mismatches are correctly detected
        sp_incomp = sparse_tensor.SparseTensor(sp.indices, sp.values, (25, 25))
        with self.assertRaises((errors.InvalidArgumentError, ValueError)):
            result = sparse_ops.map_values(mapping,
                                           sp,
                                           sp_incomp,
                                           kwarg='kwarg')
            self.evaluate(result)
class ConvertToListTest(keras_parameterized.TestCase):

  # Note: We need the inputs to be lambdas below to avoid some strangeness with
  # TF1.x graph mode - specifically, if the inputs are created outside the test
  # function body, the graph inside the test body will not contain the tensors
  # that were created in the parameters.
  @parameterized.named_parameters(
      {
          "testcase_name": "ndarray",
          "inputs": lambda: np.array([[1, 2, 3], [4, 5, 6]]),
          "expected": [[1, 2, 3], [4, 5, 6]]
      }, {
          "testcase_name": "list",
          "inputs": lambda: [[1, 2, 3], [4, 5, 6]],
          "expected": [[1, 2, 3], [4, 5, 6]]
      }, {
          "testcase_name": "tensor",
          "inputs": lambda: constant_op.constant([[1, 2, 3], [4, 5, 6]]),
          "expected": [[1, 2, 3], [4, 5, 6]]
      }, {
          "testcase_name":
              "ragged_tensor",
          "inputs":
              lambda: ragged_factory_ops.constant([[1, 2, 3, 4], [4, 5, 6]]),
          "expected": [[1, 2, 3, 4], [4, 5, 6]]
      }, {
          "testcase_name": "sparse_tensor",
          "inputs": lambda: sparse_ops.from_dense([[1, 2, 0, 4], [4, 5, 6, 0]]),
          "expected": [[1, 2, -1, 4], [4, 5, 6, -1]]
      })
  def test_conversion(self, inputs, expected):
    values = base_preprocessing_layer.convert_to_list(inputs())
    self.assertAllEqual(expected, values)
示例#8
0
 def test_sparse_bincount_col_reduce_binary(self, dtype):
     num_rows = 128
     num_cols = 27
     size = 100
     np.random.seed(42)
     inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
     np_out = np.reshape(
         np.concatenate([
             np.where(np.bincount(inp[j, :], minlength=size) > 0, 1, 0)
             for j in range(num_rows)
         ],
                        axis=0), (num_rows, size))
     # from_dense will filter out 0s.
     inp = inp + 1
     # from_dense will cause OOM in GPU.
     with ops.device("/CPU:0"):
         inp_sparse = sparse_ops.from_dense(inp)
     self.assertAllEqual(
         np_out,
         self.evaluate(
             gen_math_ops.sparse_bincount(
                 indices=inp_sparse.indices,
                 values=inp_sparse.values - 1,
                 dense_shape=inp_sparse.dense_shape,
                 size=size,
                 weights=[],
                 binary_output=True)))
示例#9
0
 def test_revive_subclassed_with_sparse_model(self):
   model = SubclassedSparseModelNoConfig(1., 2.)
   # Run data through the Model to create save spec and weights.
   x = sparse_ops.from_dense(np.ones((10, 2, 3), dtype=np.float32))
   model.predict(x, batch_size=10)
   model.save(self.path, save_format='tf')
   revived = keras_load.load(self.path)
   self._assert_revived_correctness(model, revived)
示例#10
0
 def testDenseFromConstantToSparse(self):
   expected_constant = np.reshape(np.arange(24, dtype=np.int64), (3, 4, 2))
   tensor = constant_op.constant(expected_constant)
   sparse = sparse_ops.from_dense(tensor)
   dense = sparse_ops.sparse_to_dense(sparse.indices, sparse.dense_shape,
                                      sparse.values)
   constant = self.evaluate(dense)
   self.assertAllEqual(expected_constant, constant)
示例#11
0
 def test_ragged_input_sparse_weights_fails(self):
     x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
     weights = sparse_ops.from_dense(
         np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
                  dtype=np.int32))
     with self.assertRaisesRegex(ValueError, "must be a RaggedTensor"):
         self.evaluate(
             bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
示例#12
0
 def test_sparse_input_dense_weights_fails(self):
     x = sparse_ops.from_dense(
         np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
                  dtype=np.int32))
     weights = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
     with self.assertRaisesRegex(ValueError, "must be a SparseTensor"):
         self.evaluate(
             bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
示例#13
0
 def testConstantStringToSparse(self):
     # Test case for GitHub issue 40633.
     tensor = constant_op.constant(list('ababa'))
     sparse = sparse_ops.from_dense(tensor)
     result = self.evaluate(sparse)
     self.assertAllEqual([[0], [1], [2], [3], [4]], result.indices)
     self.assertAllEqual([b'a', b'b', b'a', b'b', b'a'], result.values)
     self.assertAllEqual([5], result.dense_shape)
 def testComplex(self):
     for dtype in [dtypes.complex64, dtypes.complex128]:
         tf_val = math_ops.cast(
             constant_op.constant([1.0 + 1.0j, 2.0 - 2.0j]),
             dtypes.complex128)
         tf_ans = sparse_ops.sparse_tensor_to_dense(
             sparse_ops.from_dense(tf_val))
         self.assertAllClose(tf_val, tf_ans)
示例#15
0
 def test_sparse_input(self,
                       x,
                       expected_indices,
                       expected_values,
                       expected_shape,
                       maxlength=None,
                       minlength=None,
                       binary_output=False,
                       weights=None,
                       axis=-1):
   x_sparse = sparse_ops.from_dense(x)
   w_sparse = sparse_ops.from_dense(weights) if weights is not None else None
   y = bincount_ops.sparse_bincount(
       x_sparse,
       weights=w_sparse,
       minlength=minlength,
       maxlength=maxlength,
       binary_output=binary_output,
       axis=axis)
   self.assertAllEqual(expected_indices, y.indices)
   self.assertAllEqual(expected_values, y.values)
   self.assertAllEqual(expected_shape, y.dense_shape)
  def test_sparse_op_layer_keras_tensors(self):
    int_values = keras.Input(shape=(None,), dtype=dtypes.int32, sparse=True)
    float_values = math_ops.cast(int_values, dtypes.float32)
    _ = keras.Model(int_values, float_values)
    model = keras.Model(int_values, float_values)
    model.compile(loss='mse')

    input_data = sparse_ops.from_dense(
        np.array([[1, 2], [3, 4]], dtype=np.int32))
    expected = [[1.0, 2.0], [3.0, 4.0]]
    output = model.predict(input_data)
    self.assertIsInstance(output, sparse_tensor.SparseTensor)
    self.assertAllClose(expected, sparse_ops.sparse_tensor_to_dense(output))
    def _assert_revived_correctness(self, model, revived):
        self.assertAllEqual(model.input_names, revived.input_names)
        self.assertAllEqual(model.output_names, revived.output_names)
        if model.inputs is not None:
            self.assertTrue(
                all([
                    i.shape.as_list() == r.shape.as_list()
                    and i.dtype == r.dtype
                    for (i, r) in zip(model.inputs, revived.inputs)
                ]))
            self.assertTrue(
                all([
                    i.shape.as_list() == r.shape.as_list()
                    and i.dtype == r.dtype
                    for (i, r) in zip(model.outputs, revived.outputs)
                ]))

        self.assertAllClose(self.evaluate(model.weights),
                            self.evaluate(revived.weights))
        input_arr = constant_op.constant(
            np.random.random((2, 2, 3)).astype(np.float32))
        if isinstance(revived._saved_model_inputs_spec,
                      sparse_tensor.SparseTensorSpec):
            input_arr = sparse_ops.from_dense(input_arr)

        self.assertAllClose(model(input_arr), revived(input_arr))
        self.assertAllClose(sum(model.losses), sum(revived.losses))
        self.assertAllClose(len(model.losses), len(revived.losses))
        self.assertEqual(len(model.metrics), len(revived.metrics))
        # TODO(b/150403085): Investigate why the metric order changes when running
        # this test in tf-nightly.
        self.assertAllClose(sorted([m.result() for m in model.metrics]),
                            sorted([m.result() for m in revived.metrics]))
        model_layers = {layer.name: layer for layer in model.layers}
        revived_layers = {layer.name: layer for layer in revived.layers}
        self.assertAllEqual(model_layers.keys(), revived_layers.keys())

        for name in model_layers:
            model_layer = model_layers[name]
            revived_layer = revived_layers[name]
            self.assertEqual(model_layer.name, revived_layer.name)
            self.assertEqual(model_layer.dtype, revived_layer.dtype)
            self.assertEqual(model_layer.trainable, revived_layer.trainable)
            if 'WithConfig' in type(model_layer).__name__:
                self.assertEqual(type(model_layer), type(revived_layer))
            else:
                # When loading layers from SavedModel, a new class is dynamically
                # created with the same name.
                self.assertEqual(
                    type(model_layer).__name__,
                    type(revived_layer).__name__)
示例#18
0
 def test_sparse_input_col_reduce_count(self, dtype):
   num_rows = 128
   num_cols = 27
   size = 100
   np.random.seed(42)
   inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
   np_out = np.reshape(
       np.concatenate(
           [np.bincount(inp[j, :], minlength=size) for j in range(num_rows)],
           axis=0), (num_rows, size))
   # from_dense will filter out 0s.
   inp = inp + 1
   # from_dense will cause OOM in GPU.
   with ops.device("/CPU:0"):
     inp_sparse = sparse_ops.from_dense(inp)
     inp_sparse = sparse_tensor.SparseTensor(inp_sparse.indices,
                                             inp_sparse.values - 1,
                                             inp_sparse.dense_shape)
   self.assertAllEqual(
       np_out, self.evaluate(bincount_ops.bincount(arr=inp_sparse, axis=-1)))
示例#19
0
  def test_sparse_input(self):
    input_array = np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64)
    sparse_tensor_data = sparse_ops.from_dense(input_array)

    # pyformat: disable
    expected_output = [[0, 1, 1, 1, 0, 0],
                       [0, 1, 0, 1, 0, 0]]
    # pyformat: enable
    max_tokens = 6
    expected_output_shape = [None, max_tokens]

    input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)

    layer = get_layer_class()(
        max_tokens=max_tokens, output_mode=category_encoding.BINARY)
    int_data = layer(input_data)
    self.assertAllEqual(expected_output_shape, int_data.shape.as_list())

    model = keras.Model(inputs=input_data, outputs=int_data)
    output_dataset = model.predict(sparse_tensor_data, steps=1)
    self.assertAllEqual(expected_output, output_dataset)
示例#20
0
class RaggedPrintV2Test(test_util.TensorFlowTestCase, parameterized.TestCase):

    # pylint: disable=g-long-lambda
    @parameterized.named_parameters([
        dict(testcase_name='2d_int_values',
             inputs=lambda: [ragged_factory_ops.constant([[1, 2], [3]])],
             expected='[[1, 2], [3]]\n'),
        dict(testcase_name='3d_int_values',
             inputs=lambda:
             [ragged_factory_ops.constant([[[1, 2], [3]], [[4]]])],
             expected='[[[1, 2], [3]], [[4]]]\n'),
        dict(testcase_name='2d_str_values',
             inputs=lambda: [ragged_factory_ops.constant([['a', 'b'], ['c']])],
             expected="[['a', 'b'], ['c']]\n"),
        dict(testcase_name='2d_str_values_with_escaping',
             inputs=lambda: [ragged_factory_ops.constant([["a'b"], ['c"d']])],
             expected="[['a\\'b'], ['c\"d']]\n"),
        dict(testcase_name='two_ragged_values',
             inputs=lambda: [
                 ragged_factory_ops.constant([[1, 2], [3]]),
                 ragged_factory_ops.constant([[5], [], [6, 7, 8]])
             ],
             expected='[[1, 2], [3]] [[5], [], [6, 7, 8]]\n'),
        dict(testcase_name='ragged_value_and_non_tensor_values',
             inputs=lambda:
             ['a', 5, True,
              ragged_factory_ops.constant([[1, 2], [3]]), 'c'],
             expected='a 5 True [[1, 2], [3]] c\n'),
        dict(testcase_name='ragged_value_and_dense_value',
             inputs=lambda: [
                 ragged_factory_ops.constant([[1, 2], [3]]),
                 constant_op.constant([[1, 2], [3, 4]])
             ],
             expected='[[1, 2], [3]] [[1 2]\n [3 4]]\n'),
        dict(
            testcase_name='ragged_value_and_sparse_value',
            inputs=lambda: [
                ragged_factory_ops.constant([[1, 2], [3]]),
                sparse_ops.from_dense([[1]])
            ],
            expected=(
                '[[1, 2], [3]] '
                "'SparseTensor(indices=[[0 0]], values=[1], shape=[1 1])'\n")),
        dict(testcase_name='summarize_default',
             inputs=lambda: [
                 ragged_factory_ops.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9],
                                              [10], [], [], [], [], [11, 12]])
             ],
             expected=('[[1, 2, 3, ..., 7, 8, 9], [10], [], '
                       '..., '
                       '[], [], [11, 12]]\n')),
        dict(testcase_name='summarize_2',
             inputs=lambda: [
                 ragged_factory_ops.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9],
                                              [10], [], [], [], [], [11, 12]])
             ],
             summarize=2,
             expected='[[1, 2, ..., 8, 9], [10], ..., [], [11, 12]]\n'),
        dict(testcase_name='summarize_neg1',
             inputs=lambda: [
                 ragged_factory_ops.constant([[1, 2, 3, 4, 5, 6, 7, 8, 9],
                                              [10], [], [], [], [], [11, 12]])
             ],
             summarize=-1,
             expected=('[[1, 2, 3, 4, 5, 6, 7, 8, 9], [10], '
                       '[], [], [], [], [11, 12]]\n')),
    ])
    def testRaggedPrint(self, inputs, expected, summarize=None):
        if callable(inputs):
            inputs = inputs()
        with tempfile.TemporaryDirectory() as tmpdirname:
            path = os.path.join(tmpdirname, 'print_output')
            kwargs = {'output_stream': 'file://{}'.format(path)}
            if summarize is not None:
                kwargs.update(summarize=summarize)
            self.evaluate(logging_ops.print_v2(*inputs, **kwargs))
            actual = open(path, 'r').read()
            self.assertEqual(repr(actual), repr(expected))
class ExtensionTypeTest(test_util.TensorFlowTestCase, parameterized.TestCase):

  @parameterized.named_parameters([
      ('Ragged', lambda: ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])),
      ('Sparse', lambda: sparse_ops.from_dense([[0, 0, 3, 0], [1, 2, 0, 0]])),
  ])
  def testEncodeAndDecode(self, value_factory):
    value = value_factory()

    encoded = composite_tensor_ops.composite_tensor_to_variants(value)
    self.assertEqual(encoded.dtype, dtypes.variant)
    self.assertEqual(encoded.shape.rank, 0)

    decoded = composite_tensor_ops.composite_tensor_from_variant(
        encoded, value._type_spec)
    self.assertTrue(value._type_spec.is_compatible_with(decoded._type_spec))
    value_components = nest.flatten(value, expand_composites=True)
    decoded_components = nest.flatten(decoded, expand_composites=True)
    self.assertLen(value_components, len(decoded_components))
    for v, d in zip(value_components, decoded_components):
      self.assertAllEqual(v, d)

  @parameterized.named_parameters([
      ('WrongType', lambda: ragged_factory_ops.constant([[1]]),
       sparse_tensor.SparseTensorSpec([None, None], dtypes.int32),
       r'Expected a SPARSE_TENSOR_SPEC \(based on `type_spec`\), but `encoded` '
       'contains a RAGGED_TENSOR_SPEC'),
      ('WrongNumComponents', lambda: ragged_factory_ops.constant([[1]]),
       ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32),
       'Encoded value has 2 tensor components; expected 3 components'),
      ('WrongDType', lambda: ragged_factory_ops.constant([[1]]),
       ragged_tensor.RaggedTensorSpec([None, None], dtypes.float32),
       'Tensor component 0 had dtype DT_INT32; expected dtype DT_FLOAT'),
  ])
  def testDecodingErrors(self, value, spec, message):
    encoded = composite_tensor_ops.composite_tensor_to_variants(value())
    with self.assertRaisesRegex(errors.InvalidArgumentError, message):
      self.evaluate(
          composite_tensor_ops.composite_tensor_from_variant(encoded, spec))

  @parameterized.named_parameters([
      ('IncompatibleSpec', lambda: ragged_factory_ops.constant([[1]]),
       ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32),
       r'`type_spec` .* is not compatible with `value` .*'),
  ])
  def testEncodingErrors(self, value, spec, message):
    with self.assertRaisesRegex(ValueError, message):
      composite_tensor_ops.composite_tensor_to_variants(value(), spec)

  def testRoundTripThroughTensorProto(self):
    value = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]])
    encoded = composite_tensor_ops.composite_tensor_to_variants(value)
    proto = parsing_ops.SerializeTensor(tensor=encoded)
    parsed = parsing_ops.ParseTensor(serialized=proto, out_type=dtypes.variant)
    decoded = composite_tensor_ops.composite_tensor_from_variant(
        parsed, value._type_spec)
    self.assertAllEqual(value, decoded)

  def testGradient(self):

    def func(x):
      x2 = composite_tensor_ops.composite_tensor_to_variants(x * 2)
      x3 = composite_tensor_ops.composite_tensor_from_variant(x2, x._type_spec)
      return x3.with_values(x3.values * math_ops.range(6.0))

    x = ragged_factory_ops.constant([[1.0, 2.0, 3.0], [4.0], [5.0, 6.0]])
    if context.executing_eagerly():
      with backprop.GradientTape() as t:
        t.watch(x.values)
        y = func(x)
        g = t.gradient(y.values, x.values)
    else:
      y = func(x)
      g = gradients_impl.gradients(ys=y.values, xs=x.values)[0]
    self.assertAllClose(g, [0.0, 2.0, 4.0, 6.0, 8.0, 10.0])
示例#22
0
 def test_is_extension_type_return_true_for_sparse_tensor(self):
     self.assertTrue(
         tf_utils.is_extension_type(sparse_ops.from_dense([[1, 2], [3,
                                                                    4]])))