Exemple #1
0
    def test_works_with_registered(self):
        class CustomClass(object):
            def value(self):
                return ops.convert_to_tensor(42.)

        ops.register_tensor_conversion_function(
            CustomClass, lambda value, **_: value.value())

        tf_utils.register_symbolic_tensor_type(CustomClass)

        if context.executing_eagerly():
            self.assertFalse(
                tf_utils.is_symbolic_tensor(
                    variables.Variable(name='blah', initial_value=0.)))
            self.assertFalse(
                tf_utils.is_symbolic_tensor(ops.convert_to_tensor(0.)))
            self.assertFalse(
                tf_utils.is_symbolic_tensor(
                    sparse_tensor.SparseTensor(indices=[[0, 0], [1, 2]],
                                               values=[1, 2],
                                               dense_shape=[3, 4])))
            self.assertFalse(tf_utils.is_symbolic_tensor(CustomClass()))
        else:
            self.assertTrue(
                tf_utils.is_symbolic_tensor(
                    variables.Variable(name='blah', initial_value=0.)))
            self.assertTrue(
                tf_utils.is_symbolic_tensor(ops.convert_to_tensor(0.)))
            self.assertTrue(
                tf_utils.is_symbolic_tensor(
                    sparse_tensor.SparseTensor(indices=[[0, 0], [1, 2]],
                                               values=[1, 2],
                                               dense_shape=[3, 4])))
            self.assertTrue(tf_utils.is_symbolic_tensor(CustomClass()))
  def test_works_with_registered(self):

    class CustomClass(object):

      def value(self):
        return ops.convert_to_tensor(42.)

    ops.register_tensor_conversion_function(
        CustomClass, lambda value, **_: value.value())

    tf_utils.register_symbolic_tensor_type(CustomClass)

    if context.executing_eagerly():
      self.assertFalse(tf_utils.is_symbolic_tensor(
          variables.Variable(name='blah', initial_value=0.)))
      self.assertFalse(tf_utils.is_symbolic_tensor(
          ops.convert_to_tensor(0.)))
      self.assertFalse(tf_utils.is_symbolic_tensor(
          sparse_tensor.SparseTensor(
              indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
      self.assertFalse(tf_utils.is_symbolic_tensor(CustomClass()))
    else:
      self.assertTrue(tf_utils.is_symbolic_tensor(
          variables.Variable(name='blah', initial_value=0.)))
      self.assertTrue(tf_utils.is_symbolic_tensor(
          ops.convert_to_tensor(0.)))
      self.assertTrue(tf_utils.is_symbolic_tensor(
          sparse_tensor.SparseTensor(
              indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
      self.assertTrue(tf_utils.is_symbolic_tensor(CustomClass()))
    def test_enables_nontensor_plumbing(self):
        # Setup.

        class Foo(object):
            def __init__(self, input_):
                self._input = input_
                self.value = ops.convert_to_tensor(42.)

        ops.register_tensor_conversion_function(
            Foo, lambda x, *args, **kwargs: x.value)
        tf_utils.register_symbolic_tensor_type(Foo)

        class PlumbingLayer(keras.layers.Lambda):
            def __init__(self, fn, **kwargs):
                def _fn(*fargs, **fkwargs):
                    d = fn(*fargs, **fkwargs)
                    x = ops.convert_to_tensor(d)
                    d.shape = x.shape
                    d.get_shape = x.get_shape
                    return d, x

                super(PlumbingLayer, self).__init__(_fn, **kwargs)
                self._enter_dunder_call = False

            def __call__(self, inputs, *args, **kwargs):
                self._enter_dunder_call = True
                d, _ = super(PlumbingLayer,
                             self).__call__(inputs, *args, **kwargs)
                self._enter_dunder_call = False
                return d

            def call(self, inputs, *args, **kwargs):
                d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs)
                if self._enter_dunder_call:
                    return d, v
                return d

        # User-land.
        model = keras.Sequential([
            keras.layers.InputLayer([]),
            PlumbingLayer(Foo),  # Makes a `Foo` object.
        ])
        # Let's ensure Keras graph history is preserved by composing the models.
        model = keras.Model(model.inputs, model(model.outputs))
        # Now we instantiate the model and verify we have a `Foo` object, not a
        # `Tensor`.
        y = model(ops.convert_to_tensor(7.))
        self.assertIsInstance(y, Foo)
Exemple #4
0
  def test_enables_nontensor_plumbing(self):
    # Setup.

    class Foo(object):

      def __init__(self, input_):
        self._input = input_
        self.value = ops.convert_to_tensor(42.)

    ops.register_tensor_conversion_function(
        Foo, lambda x, *args, **kwargs: x.value)
    tf_utils.register_symbolic_tensor_type(Foo)

    class PlumbingLayer(keras.layers.Lambda):

      def __init__(self, fn, **kwargs):
        def _fn(*fargs, **fkwargs):
          d = fn(*fargs, **fkwargs)
          x = ops.convert_to_tensor(d)
          d.shape = x.shape
          d.get_shape = x.get_shape
          return d, x
        super(PlumbingLayer, self).__init__(_fn, **kwargs)
        self._enter_dunder_call = False

      def __call__(self, inputs, *args, **kwargs):
        self._enter_dunder_call = True
        d, _ = super(PlumbingLayer, self).__call__(inputs, *args, **kwargs)
        self._enter_dunder_call = False
        return d

      def call(self, inputs, *args, **kwargs):
        d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs)
        if self._enter_dunder_call:
          return d, v
        return d

    # User-land.
    model = keras.Sequential([
        keras.layers.InputLayer([]),
        PlumbingLayer(Foo),  # Makes a `Foo` object.
    ])
    # Let's ensure Keras graph history is preserved by composing the models.
    model = keras.Model(model.inputs, model(model.outputs))
    # Now we instantiate the model and verify we have a `Foo` object, not a
    # `Tensor`.
    y = model(ops.convert_to_tensor(7.))
    self.assertIsInstance(y, Foo)
from tensorflow_probability.python.layers.internal import distribution_tensor_coercible as dtc
from tensorflow.python.keras.utils import tf_utils as keras_tf_utils

__all__ = [
    'CategoricalMixtureOfOneHotCategorical',
    'DistributionLambda',
    'IndependentBernoulli',
    'IndependentNormal',
    'KLDivergenceAddLoss',
    'KLDivergenceRegularizer',
    'MixtureSameFamily',
    'MultivariateNormalTriL',
    'OneHotCategorical',
]

keras_tf_utils.register_symbolic_tensor_type(dtc._TensorCoercible)  # pylint: disable=protected-access


def _event_size(event_shape, name=None):
    """Computes the number of elements in a tensor with shape `event_shape`.

  Args:
    event_shape: A tensor shape.
    name: The name to use for the tensor op to compute the number of elements
      (if such an op needs to be created).

  Returns:
    event_size: The number of elements in `tensor_shape`.  Returns a numpy int
    when the number of elements can be computed immediately.  Otherwise, returns
    a scalar tensor.
  """
Exemple #6
0
    def test_enables_nontensor_plumbing(self):
        # Setup.

        class Foo(object):
            def __init__(self, input_):
                self._input = input_
                self.value = ops.convert_to_tensor([[42.]])

            @property
            def dtype(self):
                return self.value.dtype

        ops.register_tensor_conversion_function(
            Foo, lambda x, *args, **kwargs: x.value)
        tf_utils.register_symbolic_tensor_type(Foo)

        class PlumbingLayer(keras.layers.Lambda):
            def __init__(self, fn, **kwargs):
                def _fn(*fargs, **fkwargs):
                    d = fn(*fargs, **fkwargs)
                    x = ops.convert_to_tensor(d)
                    d.shape = x.shape
                    d.get_shape = x.get_shape
                    return d, x

                super(PlumbingLayer, self).__init__(_fn, **kwargs)
                self._enter_dunder_call = False

            def __call__(self, inputs, *args, **kwargs):
                self._enter_dunder_call = True
                d, _ = super(PlumbingLayer,
                             self).__call__(inputs, *args, **kwargs)
                self._enter_dunder_call = False
                return d

            def call(self, inputs, *args, **kwargs):
                d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs)
                if self._enter_dunder_call:
                    return d, v
                return d

        # User-land.
        model = keras.Sequential([
            keras.layers.InputLayer((1, )),
            PlumbingLayer(Foo),  # Makes a `Foo` object.
        ])
        # Let's ensure Keras graph history is preserved by composing the models.
        model = keras.Model(model.inputs, model(model.outputs))
        # Now we instantiate the model and verify we have a `Foo` object, not a
        # `Tensor`.
        y = model(ops.convert_to_tensor([[7.]]))
        self.assertIsInstance(y, Foo)
        # Confirm that (custom) loss sees `Foo` instance, not Tensor.
        obtained_prediction_box = [None]

        def custom_loss(y_obs, y_pred):
            del y_obs
            obtained_prediction_box[0] = y_pred
            return y_pred

        # Apparently `compile` calls the loss function enough to trigger the
        # side-effect.
        model.compile('SGD', loss=custom_loss)
        self.assertIsInstance(obtained_prediction_box[0], Foo)
Exemple #7
0
    assert not as_ref, "Not implemented, as_ref={}".format(as_ref)
    assert dtype in [tf.int32, None], dtype
    return export_tensor(tensor, dtype=dtype)


# TODO(Morten)
# this allows implicit convertion of tf_big.Tensor to tf.Tensor,
# but since the output dtype is determined by the outer context
# we essentially have to export with the implied risk of data loss
tf_ops.register_tensor_conversion_function(Tensor, _tensor_conversion_function)


# this allows tf_big.Tensor to be plumbed through Keras layers
# but seems only truly useful when used in conjunction with
# `register_tensor_conversion_function`
tf_utils.register_symbolic_tensor_type(Tensor)


def constant(tensor):
    assert isinstance(tensor, (np.ndarray, list, tuple)), type(tensor)
    return import_tensor(tensor)


def _convert_to_numpy_tensor(tensor):
    if isinstance(tensor, np.ndarray):
        return tensor

    if isinstance(tensor, (int, str)):
        return np.array([[tensor]])

    if isinstance(tensor, (list, tuple)):
  def test_enables_nontensor_plumbing(self):
    # Setup.

    class Foo(object):

      def __init__(self, input_):
        self._input = input_
        self.value = ops.convert_to_tensor(42.)

      @property
      def dtype(self):
        return self.value.dtype

    ops.register_tensor_conversion_function(
        Foo, lambda x, *args, **kwargs: x.value)
    tf_utils.register_symbolic_tensor_type(Foo)

    class PlumbingLayer(keras.layers.Lambda):

      def __init__(self, fn, **kwargs):
        def _fn(*fargs, **fkwargs):
          d = fn(*fargs, **fkwargs)
          x = ops.convert_to_tensor(d)
          d.shape = x.shape
          d.get_shape = x.get_shape
          return d, x
        super(PlumbingLayer, self).__init__(_fn, **kwargs)
        self._enter_dunder_call = False

      def __call__(self, inputs, *args, **kwargs):
        self._enter_dunder_call = True
        d, _ = super(PlumbingLayer, self).__call__(inputs, *args, **kwargs)
        self._enter_dunder_call = False
        return d

      def call(self, inputs, *args, **kwargs):
        d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs)
        if self._enter_dunder_call:
          return d, v
        return d

    # User-land.
    model = keras.Sequential([
        keras.layers.InputLayer([]),
        PlumbingLayer(Foo),  # Makes a `Foo` object.
    ])
    # Let's ensure Keras graph history is preserved by composing the models.
    model = keras.Model(model.inputs, model(model.outputs))
    # Now we instantiate the model and verify we have a `Foo` object, not a
    # `Tensor`.
    y = model(ops.convert_to_tensor(7.))
    self.assertIsInstance(y, Foo)
    # Confirm that (custom) loss sees `Foo` instance, not Tensor.
    obtained_prediction_box = [None]
    def custom_loss(y_obs, y_pred):
      del y_obs
      obtained_prediction_box[0] = y_pred
      return y_pred
    # Apparently `compile` calls the loss function enough to trigger the
    # side-effect.
    model.compile('SGD', loss=custom_loss)
    self.assertIsInstance(obtained_prediction_box[0], Foo)