Exemple #1
0
    def __new__(mcs, name, bases, attrs):
        operators = set(tf.Tensor.OVERLOADABLE_OPERATORS)
        operators.difference_update({'__eq__', '__ne__'})
        operators.update({'__iter__'})
        attrs.update((attr, _wrap_method(attr)) for attr in operators)

        # Support methods for __iter__ and __bool__
        private_methods = {
            name
            for name in dir(tf.Tensor) if name.startswith('_disallow')
        }
        attrs.update((attr, _wrap_method(attr)) for attr in private_methods)

        if JAX_MODE or NUMPY_MODE:
            other_attrs = {'__array_priority__'}
            if six.PY2:
                other_attrs.add('__nonzero__')
            else:
                other_attrs.add('__bool__')
            attrs.update(
                (attr, getattr(np.ndarray, attr)) for attr in other_attrs)
        else:
            attrs.update(
                (attr, getattr(tf.Tensor, attr))
                for attr in {'__bool__', '__array_priority__', '__nonzero__'})
        cls = super(TensorMetaClass, mcs).__new__(mcs, name, bases, attrs)
        tf.register_tensor_conversion_function(cls, conversion_func=_tensorize)
        return cls
Exemple #2
0
  def test_works_with_registered(self):

    class CustomClass:

      def value(self):
        return tf.convert_to_tensor(42.)

    tf.register_tensor_conversion_function(
        CustomClass, lambda value, **_: value.value())

    tf_utils.register_symbolic_tensor_type(CustomClass)

    if tf.executing_eagerly():
      self.assertFalse(tf_utils.is_symbolic_tensor(
          tf.Variable(name='blah', initial_value=0.)))
      self.assertFalse(
          tf_utils.is_symbolic_tensor(
              tf.convert_to_tensor(0.)))
      self.assertFalse(tf_utils.is_symbolic_tensor(
          tf.SparseTensor(
              indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
      self.assertFalse(tf_utils.is_symbolic_tensor(CustomClass()))
    else:
      self.assertTrue(tf_utils.is_symbolic_tensor(
          tf.Variable(name='blah', initial_value=0.)))
      self.assertTrue(
          tf_utils.is_symbolic_tensor(
              tf.convert_to_tensor(0.)))
      self.assertTrue(tf_utils.is_symbolic_tensor(
          tf.SparseTensor(
              indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
      self.assertTrue(tf_utils.is_symbolic_tensor(CustomClass()))
 def __new__(mcs, name, bases, attrs):
     attrs.update(
         (attr, _wrap_method(tf.Tensor, attr))
         for attr in tf.Tensor.OVERLOADABLE_OPERATORS.union({'__iter__'}))
     attrs.update(
         (attr, getattr(tf.Tensor, attr))
         for attr in {'__nonzero__', '__bool__', '__array_priority__'})
     cls = super(TensorMetaClass, mcs).__new__(mcs, name, bases, attrs)
     tf.register_tensor_conversion_function(cls, conversion_func=_tensorize)
     return cls
Exemple #4
0
  """Defer an operator overload to `tf.Tensor`.

  We pull the operator out of tf.Tensor dynamically to avoid ordering issues.

  Args:
    cls: Class to overload operator.
    op: Python string representing the operator name.
  """
  @functools.wraps(getattr(tf.Tensor, op))
  def _run_op(a, *args):
    return getattr(tf.Tensor, op)(a.value, *args)

  setattr(cls, op, _run_op)


def _tensor_conversion_function(v, dtype=None, name=None, as_ref=False):
  del name, as_ref  # unused
  if dtype and not dtype.is_compatible_with(v.dtype):
    raise ValueError(
        "Incompatible type conversion requested to type '%s' for variable "
        "of type '%s'" % (dtype.name, v.dtype.name))
  return v.value


for operator in tf.Tensor.OVERLOADABLE_OPERATORS.difference(
    {"__getitem__"}).union({"__iter__", "__bool__", "__nonzero__"}):
  _overload_operator(RandomVariable, operator)

tf.register_tensor_conversion_function(  # enable tf.convert_to_tensor
    RandomVariable, _tensor_conversion_function)
Exemple #5
0
    def shape(self):
        return (self.n_devices, ) + self.tensors[0]._shape_tuple()  # pylint: disable=protected-access

    @property
    def dtype(self):
        return self.tensors[0].dtype


def convert_sharded_tensor_to_eager_tensor(value, *args, **kwargs):
    del args, kwargs
    # TODO(nareshmodi): Consider a collective op to gather the tensors from the
    # various devices for performance reasons.
    return tf.stack(value.tensors)


tf.register_tensor_conversion_function(ShardedNdArray,
                                       convert_sharded_tensor_to_eager_tensor)


class _PmapConfig(threading.local):
    """Simple config used to maintain state related to a current pmap call."""
    def __init__(self):
        super(_PmapConfig, self).__init__()
        self._axis_name = None
        self._devices = None

    def axis_name(self):
        return self._axis_name

    def set_axis_name(self, axis_name):
        self._axis_name = axis_name
Exemple #6
0
            self.assertIs(type(kernel), type(new_kernel))
            if 'store_parameters_in_results' in new_kernel.parameters:
                self.assertTrue(
                    new_kernel.parameters['store_parameters_in_results'])

    def testNoParameters(self):
        kernel = FakeInnerNoParameters()
        new_kernel = util.enable_store_parameters_in_results(kernel)
        self.assertIs(kernel, new_kernel)


class TensorConvertible(object):
    pass


tf.register_tensor_conversion_function(
    TensorConvertible, conversion_func=lambda *args: tf.constant(0))


class SimpleTensorWarningTest(tfp_test_util.TestCase):

    # We must defer creating the TF objects until the body of the test.
    # pylint: disable=unnecessary-lambda
    @parameterized.parameters([lambda: tf.Variable(0)],
                              [lambda: tf.Variable(0)],
                              [lambda: TensorConvertible()])
    def testWarn(self, tensor_callable):
        tensor = tensor_callable()
        warnings.simplefilter('always')
        with warnings.catch_warnings(record=True) as triggered:
            util.warn_if_parameters_are_not_simple_tensors({'a': tensor})
        self.assertTrue(
Exemple #7
0
class MyTuple(object):
    """Pretend user-side class for `ConvertToCompositeTensorTest ."""
    def __init__(self, sequence):
        self._sequence = tuple(sequence)

    def __getitem__(self, key):
        return self._sequence[key]

    def __len__(self):
        return len(self._sequence)

    def __iter__(self):
        return iter(self._sequence)


tf.register_tensor_conversion_function(
    MyTuple, conversion_func=lambda x, *_, **__: tensor_tuple.TensorTuple(x))


@test_util.test_all_tf_execution_regimes
class CustomConvertToCompositeTensorTest(test_util.TestCase):
    def test_iter(self):
        x = MyTuple((1, [2., 3.], [[4, 5], [6, 7]]))
        y = ops.convert_to_tensor_or_composite(value=x)
        # TODO(jsimsa): The behavior of `is_tensor` for composite tensors have
        # changed (from True to False) and this check needs to be disabled so that
        # both TAP presubmits (running at HEAD) and Kokoro presubmit (using TF
        # nightly) pass. Re-enable this check when TF nightly picks up this change.
        # self.assertTrue(tf.is_tensor(y))
        self.assertIsInstance(y, tensor_tuple.TensorTuple)
        self.assertLen(y, 3)
        for x_, y_ in zip(x, y):
Exemple #8
0
  def test_enables_nontensor_plumbing(self):
    if tf.executing_eagerly():
      self.skipTest('`compile` functionality changed.')
    # Setup.

    class Foo:

      def __init__(self, input_):
        self._input = input_
        self.value = tf.convert_to_tensor([[42.]])

      @property
      def dtype(self):
        return self.value.dtype

    tf.register_tensor_conversion_function(
        Foo, lambda x, *args, **kwargs: x.value)
    tf_utils.register_symbolic_tensor_type(Foo)

    class PlumbingLayer(keras.layers.Lambda):

      def __init__(self, fn, **kwargs):
        def _fn(*fargs, **fkwargs):
          d = fn(*fargs, **fkwargs)
          x = tf.convert_to_tensor(d)
          d.shape = x.shape
          d.get_shape = x.get_shape
          return d, x
        super(PlumbingLayer, self).__init__(_fn, **kwargs)
        self._enter_dunder_call = False

      def __call__(self, inputs, *args, **kwargs):
        self._enter_dunder_call = True
        d, _ = super(PlumbingLayer, self).__call__(inputs, *args, **kwargs)
        self._enter_dunder_call = False
        return d

      def call(self, inputs, *args, **kwargs):
        d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs)
        if self._enter_dunder_call:
          return d, v
        return d

    # User-land.
    model = keras.Sequential([
        keras.layers.InputLayer((1,)),
        PlumbingLayer(Foo),  # Makes a `Foo` object.
    ])
    # Let's ensure Keras graph history is preserved by composing the models.
    model = keras.Model(model.inputs, model(model.outputs))
    # Now we instantiate the model and verify we have a `Foo` object, not a
    # `Tensor`.
    y = model(tf.convert_to_tensor([[7.]]))
    self.assertIsInstance(y, Foo)
    # Confirm that (custom) loss sees `Foo` instance, not Tensor.
    obtained_prediction_box = [None]
    def custom_loss(y_obs, y_pred):
      del y_obs
      obtained_prediction_box[0] = y_pred
      return y_pred
    # Apparently `compile` calls the loss function enough to trigger the
    # side-effect.
    model.compile('SGD', loss=custom_loss)
    self.assertIsInstance(obtained_prediction_box[0], Foo)
  def __init__(self, x, name=None):
    super(FakeModule, self).__init__(name)
    self.x = tensor_util.convert_nonref_to_tensor(x)

  @property
  def dtype(self):
    return tf.as_dtype(self.x.dtype)

  @property
  def shape(self):
    return tf.TensorShape(self.x.shape)


tf.register_tensor_conversion_function(
    base_type=FakeModule,
    conversion_func=lambda d, *_, **__: tf.convert_to_tensor(d.x))


@test_util.test_all_tf_execution_regimes
class ConvertNonrefToTensorTest(test_util.TestCase):

  def test_np_object(self):
    x = np.array(0.)
    y = tensor_util.convert_nonref_to_tensor(x)
    self.assertIsInstance(y, tf.Tensor)
    self.assertEqual(x, self.evaluate(y))

  def test_tf_tensor(self):
    x = tf.constant(1.)
    y = tensor_util.convert_nonref_to_tensor(x)
      return self.read_value().__matmul__(o)
    except AttributeError:
      # See https://docs.python.org/3/library/constants.html#NotImplemented
      return NotImplemented

  def __rmatmul__(self, o):
    try:
      return self.read_value().__rmatmul__(o)
    except AttributeError:
      # See https://docs.python.org/3/library/constants.html#NotImplemented
      return NotImplemented

  # pylint: enable=multiple-statements


tf.register_tensor_conversion_function(AutoCastVariable,
                                        AutoCastVariable._dense_var_to_tensor)  # pylint:disable=protected-access


def create_autocast_variable(variable):
  """Creates an AutoCastVariable that wraps another variable.

  This typically just returns `AutoCastVariable(variable)`. But, if the variable
  is a DistributedVariable or one of its subclasses, we instead dynamically
  create a class that subclasses from both AutoCastVariable and
  variable.__class__. This is so the returned variable will still pass
  `isinstance(variable, variable.__class__)`, which is required for
  DistributedVariables and its subclasses to work properly.

  Args:
    variable: A floating-point resource variable to wrap.
            return self.read_value().__matmul__(o)
        except AttributeError:
            # See
            # https://docs.python.org/3/library/constants.html#NotImplemented
            return NotImplemented

    def __rmatmul__(self, o):
        try:
            return self.read_value().__rmatmul__(o)
        except AttributeError:
            # See
            # https://docs.python.org/3/library/constants.html#NotImplemented
            return NotImplemented


tf.register_tensor_conversion_function(AutoCastVariable,
                                       AutoCastVariable._dense_var_to_tensor)


def create_autocast_variable(variable):
    """Creates an AutoCastVariable that wraps another variable.

    This typically just returns `AutoCastVariable(variable)`. But, if the
    variable is a DistributedVariable or one of its subclasses, we instead
    dynamically create a class that subclasses from both AutoCastVariable and
    variable.__class__. This is so the returned variable will still pass
    `isinstance(variable, variable.__class__)`, which is required for
    DistributedVariables and its subclasses to work properly.

    Args:
      variable: A floating-point resource variable to wrap.
Exemple #12
0
    @property
    def shape(self):
        return self.data.shape

    @property
    def dtype(self):
        return self.data.dtype


def fail_on_convert(x, **kwargs):
    _ = x
    _ = kwargs
    raise TypeError('Cannot convert DummyArrayLike to a tensor')


tf.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)


class DataAdapterTestBase(keras_parameterized.TestCase):
    def setUp(self):
        super(DataAdapterTestBase, self).setUp()
        self.batch_size = 5
        self.numpy_input = np.zeros((50, 10))
        self.numpy_target = np.ones(50)
        self.tensor_input = tf.constant(2.0, shape=(50, 10))
        self.tensor_target = tf.ones((50, ))
        self.arraylike_input = DummyArrayLike(self.numpy_input)
        self.arraylike_target = DummyArrayLike(self.numpy_target)
        self.dataset_input = tf.data.Dataset.from_tensor_slices(
            (self.numpy_input,
             self.numpy_target)).shuffle(50).batch(self.batch_size)