Example #1
0
  def test_works_with_registered(self):

    class CustomClass(object):

      def value(self):
        return ops.convert_to_tensor(42.)

    ops.register_tensor_conversion_function(
        CustomClass, lambda value, **_: value.value())

    tf_utils.register_symbolic_tensor_type(CustomClass)

    if context.executing_eagerly():
      self.assertFalse(tf_utils.is_symbolic_tensor(
          variables.Variable(name='blah', initial_value=0.)))
      self.assertFalse(tf_utils.is_symbolic_tensor(
          ops.convert_to_tensor(0.)))
      self.assertFalse(tf_utils.is_symbolic_tensor(
          sparse_tensor.SparseTensor(
              indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
      self.assertFalse(tf_utils.is_symbolic_tensor(CustomClass()))
    else:
      self.assertTrue(tf_utils.is_symbolic_tensor(
          variables.Variable(name='blah', initial_value=0.)))
      self.assertTrue(tf_utils.is_symbolic_tensor(
          ops.convert_to_tensor(0.)))
      self.assertTrue(tf_utils.is_symbolic_tensor(
          sparse_tensor.SparseTensor(
              indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
      self.assertTrue(tf_utils.is_symbolic_tensor(CustomClass()))
Example #2
0
  def test_enables_nontensor_plumbing(self):
    # Setup.

    class Foo(object):

      def __init__(self, input_):
        self._input = input_
        self.value = ops.convert_to_tensor(42.)

    ops.register_tensor_conversion_function(
        Foo, lambda x, *args, **kwargs: x.value)
    tf_utils.register_symbolic_tensor_type(Foo)

    class PlumbingLayer(keras.layers.Lambda):

      def __init__(self, fn, **kwargs):
        def _fn(*fargs, **fkwargs):
          d = fn(*fargs, **fkwargs)
          x = ops.convert_to_tensor(d)
          d.shape = x.shape
          d.get_shape = x.get_shape
          return d, x
        super(PlumbingLayer, self).__init__(_fn, **kwargs)
        self._enter_dunder_call = False

      def __call__(self, inputs, *args, **kwargs):
        self._enter_dunder_call = True
        d, _ = super(PlumbingLayer, self).__call__(inputs, *args, **kwargs)
        self._enter_dunder_call = False
        return d

      def call(self, inputs, *args, **kwargs):
        d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs)
        if self._enter_dunder_call:
          return d, v
        return d

    # User-land.
    model = keras.Sequential([
        keras.layers.InputLayer([]),
        PlumbingLayer(Foo),  # Makes a `Foo` object.
    ])
    # Let's ensure Keras graph history is preserved by composing the models.
    model = keras.Model(model.inputs, model(model.outputs))
    # Now we instantiate the model and verify we have a `Foo` object, not a
    # `Tensor`.
    y = model(ops.convert_to_tensor(7.))
    self.assertIsInstance(y, Foo)
Example #3
0
    def testFullDelegationControlUsingRegistry(self):
        class NumpyArraySubclass(np.ndarray):
            def __radd__(self, lhs):
                return "Works!"

        def raise_to_delegate(value, dtype=None, name=None, as_ref=False):
            del value, dtype, name, as_ref  # Unused.
            raise TypeError

        ops.register_tensor_conversion_function(NumpyArraySubclass,
                                                raise_to_delegate,
                                                priority=0)
        tensor = ops.convert_to_tensor([[10.0, 20.0]])
        rhs = NumpyArraySubclass(shape=(1, 2), buffer=np.array([1.0, 2.0]))
        res = tensor + rhs
        self.assertEqual(res, "Works!")
Example #4
0
def register_tensor_conversion(convertable,
                               name=None,
                               overload_operators=True,
                               priority=10):  # higher than any tf conversion
    def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
        return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref)

    ops.register_tensor_conversion_function(convertable,
                                            _dense_var_to_tensor,
                                            priority=priority)
    if name:
        pass
        # _pywrap_utils.RegisterType(name, convertable)

    if overload_operators:
        convertable._OverloadAllOperators()
  def testFullDelegationControlUsingRegistry(self):

    class NumpyArraySubclass(np.ndarray):

      def __radd__(self, lhs):
        return "Works!"

    def raise_to_delegate(value, dtype=None, name=None, as_ref=False):
      del value, dtype, name, as_ref  # Unused.
      raise TypeError

    ops.register_tensor_conversion_function(
        NumpyArraySubclass, raise_to_delegate, priority=0)
    tensor = ops.convert_to_tensor([[10.0, 20.0]])
    rhs = NumpyArraySubclass(shape=(1, 2), buffer=np.array([1.0, 2.0]))
    res = tensor + rhs
    self.assertEqual(res, "Works!")
Example #6
0
    def test_works_with_registered(self):
        class CustomClass(object):
            def value(self):
                return ops.convert_to_tensor_v2_with_dispatch(42.)

        ops.register_tensor_conversion_function(
            CustomClass, lambda value, **_: value.value())

        tf_utils.register_symbolic_tensor_type(CustomClass)

        if context.executing_eagerly():
            self.assertFalse(
                tf_utils.is_symbolic_tensor(
                    variables.Variable(name='blah', initial_value=0.)))
            self.assertFalse(
                tf_utils.is_symbolic_tensor(
                    ops.convert_to_tensor_v2_with_dispatch(0.)))
            self.assertFalse(
                tf_utils.is_symbolic_tensor(
                    sparse_tensor.SparseTensor(indices=[[0, 0], [1, 2]],
                                               values=[1, 2],
                                               dense_shape=[3, 4])))
            self.assertFalse(tf_utils.is_symbolic_tensor(CustomClass()))
        else:
            self.assertTrue(
                tf_utils.is_symbolic_tensor(
                    variables.Variable(name='blah', initial_value=0.)))
            self.assertTrue(
                tf_utils.is_symbolic_tensor(
                    ops.convert_to_tensor_v2_with_dispatch(0.)))
            self.assertTrue(
                tf_utils.is_symbolic_tensor(
                    sparse_tensor.SparseTensor(indices=[[0, 0], [1, 2]],
                                               values=[1, 2],
                                               dense_shape=[3, 4])))
            self.assertTrue(tf_utils.is_symbolic_tensor(CustomClass()))
    return self._read_variable_op()

  def _read_variable_op(self):
    with ops.control_dependencies([self._parent_op]):
      return gen_resource_variable_ops.read_variable_op(self._handle,
                                                        self._dtype)

  def set_shape(self, shape):
    self._shape = shape

  @property
  def op(self):
    """The op for this variable."""
    return self._parent_op

ops.register_tensor_conversion_function(_UnreadVariable, _dense_var_to_tensor)
ops.register_dense_tensor_like_type(_UnreadVariable)

# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.

# Note: registering for Variable after ResourceVariable because inheritance will
# otherwise lead to the wrong behavior.
ops.register_tensor_conversion_function(ResourceVariable, _dense_var_to_tensor)
ops.register_tensor_conversion_function(
    variables.Variable, variables.Variable._TensorConversionFunction)  # pylint: disable=protected-access

# pylint: disable=protected-access
ResourceVariable._OverloadAllOperators()
ops.register_dense_tensor_like_type(ResourceVariable)
     collections: any collections in which this operation should be inserted.
     trainable: whether this read is to be used for training.

    Returns:
     the read operation.
    """
    with ops.name_scope("Read"):
      value = gen_resource_variable_ops.read_variable_op(
          self._handle, dtype=self._dtype)
    _register_variable_read(value, collections=collections, trainable=trainable)
    return value

  def sparse_read(self, indices, collections=None, trainable=True, name=None):
    with ops.name_scope("Gather" if name is None else name):
      value = gen_resource_variable_ops.resource_gather(
          self._handle, indices, dtype=self._dtype)
    _register_variable_read(value, collections=collections, trainable=trainable)
    return value


# pylint: disable=unused-argument
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
  if dtype is not None and dtype != var.value.dtype:
    print("trying to switch the dtype to ", dtype, " from ", var.value.dtype)
    return NotImplemented
  return var.value

# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
ops.register_tensor_conversion_function(ResourceVariable, _dense_var_to_tensor)
Example #9
0
    @property
    def shape(self):
        return self.data.shape

    @property
    def dtype(self):
        return self.data.dtype


def fail_on_convert(x, **kwargs):
    _ = x
    _ = kwargs
    raise TypeError('Cannot convert DummyArrayLike to a tensor')


ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)


class DataAdapterTestBase(keras_parameterized.TestCase):
    def setUp(self):
        super(DataAdapterTestBase, self).setUp()
        self.batch_size = 5
        self.numpy_input = np.zeros((50, 10))
        self.numpy_target = np.ones(50)
        self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
        self.tensor_target = array_ops.ones((50, ))
        self.arraylike_input = DummyArrayLike(self.numpy_input)
        self.arraylike_target = DummyArrayLike(self.numpy_target)
        self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
            (self.numpy_input,
             self.numpy_target)).shuffle(50).batch(self.batch_size)
Example #10
0
        if not isinstance(self.data, ops.EagerTensor):
            raise TypeError('Indexing using symbolic tensor is not allowed')
        return self.data.numpy().item()

    def tolist(self):
        return self.data.numpy().tolist()

    def __str__(self):
        return 'ndarray<{}>'.format(self.data.__str__())

    def __repr__(self):
        return 'ndarray<{}>'.format(self.data.__repr__())


def tensor_to_ndarray(tensor):
    return ndarray.from_tensor(tensor)


def ndarray_to_tensor(arr, dtype=None, name=None, as_ref=False):
    if as_ref:
        raise ValueError('as_ref is not supported.')
    if dtype and dtypes.as_dtype(arr.dtype) != dtype:
        return math_ops.cast(arr.data, dtype)
    result_t = arr.data
    if name:
        result_t = array_ops.identity(result_t, name=name)
    return result_t


ops.register_tensor_conversion_function(ndarray, ndarray_to_tensor)
Example #11
0
  def _map_resources(self, save_options):
    """For implementing `Trackable`."""
    # By delegating this method to the wrapped variable, SavedModel with
    # AggregatingVariable are identical to SavedModel with normal variables.
    obj_map, resource_map = self._v._map_resources(save_options)  # pylint:disable=protected-access
    obj_map[self] = obj_map[self._v]
    return obj_map, resource_map


# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_aggregate(var, dtype=None, name=None, as_ref=False):
  return var._dense_var_to_tensor(dtype, name, as_ref)  # pylint: disable=protected-access


ops.register_tensor_conversion_function(AggregatingVariable,
                                        _tensor_conversion_aggregate)


# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_caching(var, dtype=None, name=None, as_ref=False):
  return var._dense_var_to_tensor(dtype, name, as_ref)  # pylint: disable=protected-access


ops.register_tensor_conversion_function(CachingVariable,
                                        _tensor_conversion_caching)

CachingVariable._overload_overloadable_operators()  # pylint: disable=protected-access


class DistributedTable(lookup_ops.StaticHashTable):
Example #12
0
  dense_shape_value = tensor_util.constant_value(value.dense_shape)
  if dense_shape_value is not None:
    num_elements = np.prod(dense_shape_value)
    if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
      warnings.warn(
          "Converting sparse IndexedSlices to a dense Tensor with %d elements. "
          "This may consume a large amount of memory." % num_elements)
  else:
    warnings.warn(
        "Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
        "This may consume a large amount of memory.")
  return math_ops.unsorted_segment_sum(
      value.values, value.indices, value.dense_shape[0], name=name)


ops.register_tensor_conversion_function(ops.IndexedSlices,
                                        _IndexedSlicesToTensor)


def _MarkReachedOps(from_ops, reached_ops):
  """Mark all ops reached from "from_ops".

  Args:
    from_ops: list of Operations.
    reached_ops: list of booleans, indexed by operation id.
  """
  queue = collections.deque()
  queue.extend(from_ops)
  while queue:
    op = queue.popleft()
    if not reached_ops[op._id]:
      reached_ops[op._id] = True
Example #13
0
            return self.read_value().__matmul__(o)
        except AttributeError:
            # See https://docs.python.org/3/library/constants.html#NotImplemented
            return NotImplemented

    def __rmatmul__(self, o):
        try:
            return self.read_value().__rmatmul__(o)
        except AttributeError:
            # See https://docs.python.org/3/library/constants.html#NotImplemented
            return NotImplemented

    # pylint: enable=multiple-statements


ops.register_tensor_conversion_function(AutoCastVariable,
                                        AutoCastVariable._dense_var_to_tensor)  # pylint:disable=protected-access


def create_autocast_variable(variable):
    """Creates an AutoCastVariable that wraps another variable.

  This typically just returns `AutoCastVariable(variable)`. But, if the variable
  is a DistributedVariable or one of its subclasses, we instead dynamically
  create a class that subclasses from both AutoCastVariable and
  variable.__class__. This is so the returned variable will still pass
  `isinstance(variable, variable.__class__)`, which is required for
  DistributedVariables and its subclasses to work properly.

  Args:
    variable: A floating-point resource variable to wrap.
Example #14
0
    def test_enables_nontensor_plumbing(self):
        # Setup.

        class Foo(object):
            def __init__(self, input_):
                self._input = input_
                self.value = ops.convert_to_tensor([[42.]])

            @property
            def dtype(self):
                return self.value.dtype

        ops.register_tensor_conversion_function(
            Foo, lambda x, *args, **kwargs: x.value)
        tf_utils.register_symbolic_tensor_type(Foo)

        class PlumbingLayer(keras.layers.Lambda):
            def __init__(self, fn, **kwargs):
                def _fn(*fargs, **fkwargs):
                    d = fn(*fargs, **fkwargs)
                    x = ops.convert_to_tensor(d)
                    d.shape = x.shape
                    d.get_shape = x.get_shape
                    return d, x

                super(PlumbingLayer, self).__init__(_fn, **kwargs)
                self._enter_dunder_call = False

            def __call__(self, inputs, *args, **kwargs):
                self._enter_dunder_call = True
                d, _ = super(PlumbingLayer,
                             self).__call__(inputs, *args, **kwargs)
                self._enter_dunder_call = False
                return d

            def call(self, inputs, *args, **kwargs):
                d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs)
                if self._enter_dunder_call:
                    return d, v
                return d

        # User-land.
        model = keras.Sequential([
            keras.layers.InputLayer((1, )),
            PlumbingLayer(Foo),  # Makes a `Foo` object.
        ])
        # Let's ensure Keras graph history is preserved by composing the models.
        model = keras.Model(model.inputs, model(model.outputs))
        # Now we instantiate the model and verify we have a `Foo` object, not a
        # `Tensor`.
        y = model(ops.convert_to_tensor([[7.]]))
        self.assertIsInstance(y, Foo)
        # Confirm that (custom) loss sees `Foo` instance, not Tensor.
        obtained_prediction_box = [None]

        def custom_loss(y_obs, y_pred):
            del y_obs
            obtained_prediction_box[0] = y_pred
            return y_pred

        # Apparently `compile` calls the loss function enough to trigger the
        # side-effect.
        model.compile('SGD', loss=custom_loss)
        self.assertIsInstance(obtained_prediction_box[0], Foo)
Example #15
0
                    self.sess.run(self.n_samples.initializer)
            if n:
                if not isinstance(self.n_samples, tf.Variable):
                    raise RuntimeError(
                        "Cannot set a new `n` if not a Tensor-like object was given"
                    )
                self.n_samples.load(value=n, session=self.sess)
            self.sess.run(self.sample_holder.initializer)
            self._initial_resampled = True


def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
    return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref)


ops.register_tensor_conversion_function(Data, _dense_var_to_tensor)
fetch_function = lambda data: ([data.value()], lambda val: val[0])
feed_function = lambda data, feed_val: [(data.value(), feed_val)]
feed_function_for_partial_run = lambda data: [data.value()]

from tensorflow.python.client.session import register_session_run_conversion_functions

# ops.register_dense_tensor_like_type()

register_session_run_conversion_functions(
    tensor_type=Data,
    fetch_function=fetch_function,
    feed_function=feed_function,
    feed_function_for_partial_run=feed_function_for_partial_run)

Data._OverloadAllOperators()
      return self.read_value()


# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion(var, dtype=None, name=None, as_ref=False):
  return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref)  # pylint: disable=protected-access


def replicated_fetch_function(var):
  # pylint: disable=protected-access
  return ([var._dense_var_to_tensor()], lambda v: v[0])
  # pylint: enable=protected-access


ops.register_tensor_conversion_function(ReplicatedVariable, _tensor_conversion)
ops.register_dense_tensor_like_type(ReplicatedVariable)
session_lib.register_session_run_conversion_functions(
    ReplicatedVariable, replicated_fetch_function)


def replicated_scope(num_replicas):
  """Variable scope for constructing replicated variables."""

  def _replicated_variable_getter(getter, name, *args, **kwargs):
    """Getter that constructs replicated variables."""
    collections = kwargs.pop("collections", None)
    if collections is None:
      collections = [ops.GraphKeys.GLOBAL_VARIABLES]
    kwargs["collections"] = []
Example #17
0
    def _saveable_factory(name=self._common_name):
      return _MirroredSaveable(self, self._primary_var, name)
    return {checkpointable.VARIABLE_VALUE_KEY: _saveable_factory}


# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
  # Try to avoid assignments to and other mutations of MirroredVariable
  # state except through a DistributionStrategy.update() call.
  assert not as_ref
  return ops.internal_convert_to_tensor(
      var.get(), dtype=dtype, name=name, as_ref=as_ref)


ops.register_tensor_conversion_function(MirroredVariable,
                                        _tensor_conversion_mirrored)


class _TowerLocalSaveable(saver.BaseSaverBuilder.SaveableObject):
  """Class for defining how to restore a TowerLocalVariable."""

  def __init__(self, tower_local_variable, name):
    self._tower_local_variable = tower_local_variable
    # We use a callable so that we don't have to evaluate this expression
    # in the case where we are trying to restore instead of save.
    def tensor():
      return distribute_lib.get_distribution_strategy().read_var(
          tower_local_variable)
    spec = saver.BaseSaverBuilder.SaveSpec(
        tensor=tensor,
        slice_spec="",
Example #18
0
  captured_value = tensor_map.get(ops.tensor_id(value), None)
  if captured_value is None:
    captured_value = graph_placeholder(
        dtype=dtype or value.dtype, shape=value.shape, name=name)
    if captured_value.dtype == dtypes.resource:
      captured_value._handle_data = value._handle_data  # pylint: disable=protected-access
    tensor_map[ops.tensor_id(value)] = (value, captured_value)
  else:
    captured_value = captured_value[1]
  return captured_value


# TODO(apassos): it'd be really nice if we could scope this registration.
# Note that we register this at a higher priority than ops.Tensor since we want
# to handle subclass specific conversion before a superclass conversion.
ops.register_tensor_conversion_function(
    tensor.Tensor, _convert_to_graph_constant, priority=-1)


class _CapturingContext(object):
  """Tracks references to Tensors outside this context while it is active."""

  def __init__(self):
    # known_ops are ops which are created while this context is active
    self.known_ops = set()

    # captured_tensors are all tensors referenced to by ops in this context but
    # not produced in it
    self.captured_tensors = set()

  def AddOp(self, op):  # pylint: disable=invalid-name
    if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
Example #19
0
    return property(Cache().__getitem__)
  ```

  However, that implementation holds class instances as keys, and as a result
  blocks garbage collection. (And modifying it to use weakref's as keys raises
  the lookup overhead to ~0.4 us) As a result, the WeakKeyDictionary
  implementation below turns out to be more prudent.

  Args:
    f: The function to cache.

  Returns:
    f decorated with simple caching behavior.
  """

    cache = weakref.WeakKeyDictionary()

    @functools.wraps(f)
    def wrapped(item):
        output = cache.get(item)
        if output is None:
            cache[item] = output = f(item)
        return output

    return wrapped


ops.register_tensor_conversion_function(
    TrackableAsset,
    lambda asset, **kw: ops.internal_convert_to_tensor(asset.asset_path, **kw))
Example #20
0
            container = ""
        return gen_ev_ops.ev_handle_op(shape=shape,
                                       shared_name=shared_name,
                                       name=name,
                                       Tkey=self._ktype,
                                       Tvalue=dtype,
                                       container=container)


# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.

# Note: registering for Variable after EmbeddingVariable because inheritance will
# otherwise lead to the wrong behavior.
#ops.register_tensor_conversion_function(EmbeddingVariable, _dense_var_to_tensor)
ops.register_tensor_conversion_function(
    variables.Variable, variables.Variable._TensorConversionFunction)  # pylint: disable=protected-access


@ops.RegisterGradient("EVGather")
def _GatherGrad(op, grad):
    """Gradient for gather op."""
    handle = op.inputs[0]
    indices = op.inputs[1]
    params_shape = gen_ev_ops.ev_shape(handle,
                                       Tkey=indices.dtype,
                                       Tvalue=grad.dtype)
    size = array_ops.expand_dims(array_ops.size(indices), 0)
    values_shape = array_ops.concat([size, params_shape[1:]], 0)
    values = array_ops.reshape(grad, values_shape)
    indices = array_ops.reshape(indices, size)
    return [ops.IndexedSlices(values, indices, params_shape), None, None]
Example #21
0
                var_list.append(op.outputs[0])
    if not var_list:
        return None
    else:
        ranks = []
        for var in var_list:
            with ops.device(var.device):
                ranks.append(array_ops.rank(var))
        if len(ranks) == 1:
            return ranks[0]
        else:
            return array_ops.pack(ranks)


# pylint: disable=protected-access
ops.register_tensor_conversion_function(Variable,
                                        Variable._TensorConversionFunction)
Variable._OverloadAllOperators()
# pylint: enable=protected-access

ops.register_proto_function(ops.GraphKeys.VARIABLES,
                            proto_type=variable_pb2.VariableDef,
                            to_proto=Variable.to_proto,
                            from_proto=Variable.from_proto)
ops.register_proto_function(ops.GraphKeys.TRAINABLE_VARIABLES,
                            proto_type=variable_pb2.VariableDef,
                            to_proto=Variable.to_proto,
                            from_proto=Variable.from_proto)
ops.register_proto_function(ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
                            proto_type=variable_pb2.VariableDef,
                            to_proto=Variable.to_proto,
                            from_proto=Variable.from_proto)
Example #22
0
            delta_get_op = delta_staging_area.get()[0]
        # Return the actual updates. The colocation constraint will be reapplied.
        return self.real_var.assign_sub(delta_get_op, read_value=read_value)

    @staticmethod
    # pylint: disable=bad-staticmethod-argument,invalid-name
    def _TensorConversionFunction(self, dtype=None, name=None, as_ref=False):
        """Utility function for converting a StagedModelVariable to a Tensor."""
        del dtype, name  # unused: this function returns the cached ref or value.
        if as_ref:
            return self._ref()
        else:
            return self._value()


ops.register_tensor_conversion_function(
    StagedModelVariable, StagedModelVariable._TensorConversionFunction)  # pylint: disable=protected-access


class StagedVariableGetter(object):
    """A variable getter through staging buffers on devices.

  Instead of a caching device, this getter tracks where the variable is used.
  And on each device, it goes through a staging buffer.
  """
    def __init__(self, device_num, devices, cpu_device, variable_mgr):
        """Initializer for StagedVariableGetter.

    Args:
      device_num: the current device index.
      devices: a list of all the devices to build towers.
      cpu_device: a cpu_device for this replica. If None, no cpu-caching is
Example #23
0
        # to fail.
        for i, inp in enumerate(inputs):
            if inp.graph is not self:
                inputs[i] = capture_value(self.captures, inp, inp.dtype,
                                          inp.op.name)
        return super(CapturingGraph,
                     self).create_op(op_type, inputs, dtypes, input_types,
                                     name, attrs, op_def, compute_shapes,
                                     compute_device)


# TODO(apassos): it'd be really nice if we could scope this registration.
# Note that we register this at a higher priority than ops.Tensor since we want
# to handle subclass specific conversion before a superclass conversion.
ops.register_tensor_conversion_function(ops.EagerTensor,
                                        _convert_to_graph_tensor,
                                        priority=-1)


# pylint: disable=invalid-name
class HelperContext(object):
    """ControlFlowContext with a customizable AddOp method."""
    def __init__(self, add_op_internal):
        self._add_op_internal = add_op_internal
        self._values = set()  # control flow code sometimes updates this.

    def _AddOpInternal(self, op):
        self._add_op_internal(op)

    @property
    def outer_context(self):
Example #24
0
      compute_device=True):
    # TODO(apassos) this should do some form of alias analysis as ops which
    # forward the resources such as Identity and Switch can cause serialization
    # to fail.
    for i, inp in enumerate(inputs):
      if inp.graph is not self:
        inputs[i] = capture_value(self.captures, inp, inp.dtype, inp.op.name)
    return super(CapturingGraph, self).create_op(
        op_type, inputs, dtypes, input_types, name, attrs, op_def,
        compute_shapes, compute_device)


# TODO(apassos): it'd be really nice if we could scope this registration.
# Note that we register this at a higher priority than ops.Tensor since we want
# to handle subclass specific conversion before a superclass conversion.
ops.register_tensor_conversion_function(
    ops.EagerTensor, _convert_to_graph_tensor, priority=-1)


# pylint: disable=invalid-name
class HelperContext(object):
  """ControlFlowContext with a customizable AddOp method."""

  def __init__(self, add_op_internal):
    self._add_op_internal = add_op_internal
    self._values = set()  # control flow code sometimes updates this.

  def _AddOpInternal(self, op):
    self._add_op_internal(op)

  @property
  def outer_context(self):
Example #25
0
  def test_enables_nontensor_plumbing(self):
    # Setup.

    class Foo(object):

      def __init__(self, input_):
        self._input = input_
        self.value = ops.convert_to_tensor(42.)

      @property
      def dtype(self):
        return self.value.dtype

    ops.register_tensor_conversion_function(
        Foo, lambda x, *args, **kwargs: x.value)
    tf_utils.register_symbolic_tensor_type(Foo)

    class PlumbingLayer(keras.layers.Lambda):

      def __init__(self, fn, **kwargs):
        def _fn(*fargs, **fkwargs):
          d = fn(*fargs, **fkwargs)
          x = ops.convert_to_tensor(d)
          d.shape = x.shape
          d.get_shape = x.get_shape
          return d, x
        super(PlumbingLayer, self).__init__(_fn, **kwargs)
        self._enter_dunder_call = False

      def __call__(self, inputs, *args, **kwargs):
        self._enter_dunder_call = True
        d, _ = super(PlumbingLayer, self).__call__(inputs, *args, **kwargs)
        self._enter_dunder_call = False
        return d

      def call(self, inputs, *args, **kwargs):
        d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs)
        if self._enter_dunder_call:
          return d, v
        return d

    # User-land.
    model = keras.Sequential([
        keras.layers.InputLayer([]),
        PlumbingLayer(Foo),  # Makes a `Foo` object.
    ])
    # Let's ensure Keras graph history is preserved by composing the models.
    model = keras.Model(model.inputs, model(model.outputs))
    # Now we instantiate the model and verify we have a `Foo` object, not a
    # `Tensor`.
    y = model(ops.convert_to_tensor(7.))
    self.assertIsInstance(y, Foo)
    # Confirm that (custom) loss sees `Foo` instance, not Tensor.
    obtained_prediction_box = [None]
    def custom_loss(y_obs, y_pred):
      del y_obs
      obtained_prediction_box[0] = y_pred
      return y_pred
    # Apparently `compile` calls the loss function enough to trigger the
    # side-effect.
    model.compile('SGD', loss=custom_loss)
    self.assertIsInstance(obtained_prediction_box[0], Foo)
Example #26
0
  def _should_act_as_resource_variable(self):
    """Pass resource_variable_ops.is_resource_variable check."""
    pass


# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion(var, dtype=None, name=None, as_ref=False):
  # Try to avoid assignments to and other mutations of MirroredVariable
  # state except through a DistributionStrategy.update() call.
  assert not as_ref
  return ops.internal_convert_to_tensor(
      var.get(), dtype=dtype, name=name, as_ref=as_ref)


ops.register_tensor_conversion_function(DistributedVariable, _tensor_conversion)
ops.register_dense_tensor_like_type(DistributedVariable)


class _MirroredSaveable(saver.BaseSaverBuilder.ResourceVariableSaveable):
  """Class for defining how to restore a MirroredVariable."""

  def __init__(self, mirrored_variable, primary_variable, name):
    self._mirrored_variable = mirrored_variable
    super(_MirroredSaveable, self).__init__(primary_variable, "", name)

  def restore(self, restored_tensors, restored_shapes):
    """Restore the same value into all variables."""
    tensor, = restored_tensors
    return control_flow_ops.group([
        _assign_on_device(d, v, tensor)
          'of type {!r}'.format(dtype.name, self.dtype.name))
    val = ops.internal_convert_to_tensor(self._variable,
                                         self._variable.dtype, name,
                                         as_ref=False)
    with ops.colocate_with(None, ignore_existing=True):
      with ops.device(val.device):
        return math_ops.cast(val, self.dtype)

  def _should_act_as_resource_variable(self):
    """Pass resource_variable_ops.is_resource_variable check."""
    pass

  # TODO(reedwm): Define operator overloads.


ops.register_tensor_conversion_function(
    AutoCastVariable, AutoCastVariable._dense_var_to_tensor)  # pylint:disable=protected-access
ops.register_dense_tensor_like_type(AutoCastVariable)


# We have DistributedVariable subclass to pass
# isinstance(..., DistributedVariable) checks when wrapping a
# DistributedVariable.
# TODO(reedwm): We should not wrap DistributedVariable, but instead have
# DistributedVariable wrap AutoCastVariable. Subclassing DistributedVariable is
# messy, because we do not fully implement the interface of DistributedVariable.
class AutoCastDistributedVariable(AutoCastVariable,
                                  distribute_values.DistributedVariable):
  """Version of AutoCastVariable that subclasses DistributedVariable."""

  def __init__(self, variable):
    if not isinstance(variable, distribute_values.DistributedValues):
Example #28
0
  def devices(self):
    return set(tensor.device for tensor in self.tensors)

  def __str__(self):
    return "PartitionedTensor([%s, ...], dtype=%s, shape=%s)" % (
        self.tensors[0].name, self.dtype.name, tuple(self.shape.as_list()))

  def __hash__(self):
    return hash(tuple(self.tensors))

  def as_tensor(self, dtype=None, name=None, as_ref=False):
    with ops.name_scope(name, "PartitionedTensor.as_tensor", self.tensors):
      assert not as_ref
      assert dtype in [None, self.dtype]
      result = array_ops.concat(self.tensors, axis=0)

      # Cache 'result' if we haven't already cached a value for this device.
      if result.device not in self._concats:
        self._concats[result.device] = result
      return self._concats[result.device]


ops.register_tensor_conversion_function(
    PartitionedTensor,
    lambda val, dtype, name, as_ref: val.as_tensor(dtype, name, as_ref))


# TODO(b/69623235): Add a function for finding tensors that share gradients
# to eliminate redundant fisher factor computations.
Example #29
0
            # Return an empty tensor so we only need to check for returned tensor
            # size being 0 as an indication of model ready.
            return array_ops.constant([], dtype=dtypes.string)
        else:
            # Get a 1-D boolean tensor listing whether each variable is initialized.
            variables_mask = math_ops.logical_not(
                array_ops.pack([state_ops.is_variable_initialized(v) for v in var_list])
            )
            # Get a 1-D string tensor containing all the variable names.
            variable_names_tensor = array_ops.constant([s.op.name for s in var_list])
            # Return a 1-D tensor containing all the names of uninitialized variables.
            return array_ops.boolean_mask(variable_names_tensor, variables_mask)


# pylint: disable=protected-access
ops.register_tensor_conversion_function(Variable, Variable._TensorConversionFunction)
Variable._OverloadAllOperators()

ops.register_tensor_conversion_function(PartitionedVariable, PartitionedVariable._TensorConversionFunction)
# pylint: enable=protected-access

ops.register_dense_tensor_like_type(Variable)
ops.register_proto_function(
    ops.GraphKeys.GLOBAL_VARIABLES,
    proto_type=variable_pb2.VariableDef,
    to_proto=Variable.to_proto,
    from_proto=Variable.from_proto,
)
ops.register_proto_function(
    ops.GraphKeys.TRAINABLE_VARIABLES,
    proto_type=variable_pb2.VariableDef,
Example #30
0
    # Restore the original kernel_shape
    kernel_shape[2] = numKernelInputChannels

    for layerIndex in xrange(numStackedLayers - 1):
        if use_scope:
            commonScope = scope_name + '_shortcut_layer_' + str(layerIndex)
        blockInput = BatchNormedConvLayer(blockInput, ewma_trainer,
                                          kernel_shape, [1, 1, 1, 1],
                                          kernelInit, biasInitVal, 'SAME',
                                          operator, scale_after_norm, epsilon,
                                          is_train_phase, use_scope,
                                          commonScope)

    Hx = blockInput

    output = Tx * Hx + Cx * Gx

    if gateType == MywayFFLayer.RESIDUAL_GATE:
        # apply nonlinear op here
        # See the TODO above
        output = operator(output)

    return output


# pylint: disable=protected-access
ops.register_tensor_conversion_function(Layer, Layer._TensorConversionFunction)
Layer._OverloadAllOperators()
# pylint: enable=protected-access
  ...     return tf.nn.embedding_lookup(self.sharded_variable.variables, x)
  >>>
  >>> model = Model()
  >>> model.fn(1).numpy()
  2.0
  >>> tf.saved_model.save(model, export_dir='/tmp/saved_model',
  ...   signatures=model.serve_fn)
  """
    @property
    def _type_spec(self):
        return ShardedVariableSpec(
            *(resource_variable_ops.VariableSpec(v.shape, v.dtype)
              for v in self._variables))


def _var_to_tensor(var, dtype=None, name=None, as_ref=False):
    del name
    if dtype is not None and not dtype.is_compatible_with(var.dtype):
        raise ValueError(
            'Incompatible type conversion requested to type {!r} for variable '
            'of type {!r}'.format(dtype.name, var.dtype.name))
    if as_ref:
        raise NotImplementedError(
            "ShardedVariable doesn't support being used as a reference.")
    return array_ops.concat(var.variables, axis=0)


# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
ops.register_tensor_conversion_function(ShardedVariable, _var_to_tensor)
Example #32
0
    """Pass resource_variable_ops.is_resource_variable check."""
    pass

  def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
    """Converts a variable to a tensor."""
    # pylint: disable=protected-access
    if _enclosing_tpu_context() is None:
      if hasattr(self._primary_var, '_dense_var_to_tensor'):
        return self._primary_var._dense_var_to_tensor(dtype, name, as_ref)
      else:
        return ops.convert_to_tensor(self._primary_var)
    # pylint: enable=protected-access
    if dtype is not None and dtype != self.dtype:
      return NotImplemented
    if as_ref:
      return self.handle
    else:
      return self.read_value()


# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion(var, dtype=None, name=None, as_ref=False):
  return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref)  # pylint: disable=protected-access


ops.register_tensor_conversion_function(ReplicatedVariable, _tensor_conversion)

if not TF_23:
  ops.register_dense_tensor_like_type(ReplicatedVariable)
Example #33
0
  """Base class for asset files which need to be tracked."""

  def __init__(self, path):
    """Record the full path to the asset."""
    # We use a variable here so that @tf.functions do not capture a literal
    # value. The init_scope prevents functions from capturing `path` in an
    # initialization graph, since it is transient and should not end up in a
    # serialized function body. When serialized in a SavedModel, the variable
    # will be set during the loading process to its location in the assets/
    # directory.
    with ops.init_scope():
      if context.executing_eagerly():
        self._path = self._no_dependency(
            resource_variable_ops.ResourceVariable(
                path, dtype=dtypes.string,
                name="asset_path"))
      else:
        # Adding a variable is too disruptive when v1-style graph building,
        # since things may get fed and local variable initializers would then
        # need to be run.
        self._path = path

  @property
  def asset_path(self):
    """Fetch the current asset path."""
    return self._path

ops.register_tensor_conversion_function(
    TrackableAsset,
    lambda asset, **kw: ops.internal_convert_to_tensor(asset.asset_path, **kw))
Example #34
0
        """Converts a variable to a tensor."""
        with ds_context.enter_or_assert_strategy(self._distribute_strategy):
            return ops.convert_to_tensor(self._get(),
                                         dtype=dtype,
                                         name=name,
                                         as_ref=as_ref)


# Register a conversion functions which reads the value of the variable,
# allowing instances of the class to be used as tensors.
# MirroredVariables
def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
    return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref)  # pylint: disable=protected-access


ops.register_tensor_conversion_function(MirroredVariable,
                                        _tensor_conversion_mirrored)


# Mirrored Values
def _tensor_conversion_mirrored_val(value,
                                    dtype=None,
                                    name=None,
                                    as_ref=False):
    return ops.convert_to_tensor(value._get(),
                                 dtype=dtype,
                                 name=name,
                                 as_ref=as_ref)  # pylint: disable=protected-access


ops.register_tensor_conversion_function(Mirrored,
                                        _tensor_conversion_mirrored_val)
Example #35
0
tc.register_type_abbreviation(ops.Tensor, 'tensorflow.Tensor')
tc.register_type_abbreviation(dtypes.DType, 'tensorflow.DType')
# core LabeledTensor types
tc.register_type_abbreviation(Axis, 'labeled_tensor.Axis')
tc.register_type_abbreviation(Axes, 'labeled_tensor.Axes')
tc.register_type_abbreviation(LabeledTensor, 'labeled_tensor.LabeledTensor')


@tc.returns(ops.Tensor)
@tc.accepts(LabeledTensor)
def _convert_labeled_tensor_to_tensor(value, *args, **kwargs):
  # call ops.convert_to_tensor to handle optional arguments appropriately
  return ops.convert_to_tensor(value.tensor, *args, **kwargs)


ops.register_tensor_conversion_function(
    LabeledTensor, _convert_labeled_tensor_to_tensor)


# tc class for anything that can be coerced into a LabeledTensor
# pylint: disable=invalid-name
LabeledTensorLike = tc.Union(LabeledTensor, ops.Tensor, np.ndarray, Scalar)
# pylint: enable=invalid-name


@tc.returns(LabeledTensor)
@tc.accepts(LabeledTensorLike, object, tc.Optional(string_types))
def convert_to_labeled_tensor(value, dtype=None, name=None):
  """Converts the given `value` to a `LabeledTensor`.

  This function accepts `LabeledTensor` objects, 0-dimensional `Tensor` objects
  and numpy arrays, and Python scalars. Higher dimensional unlabeled tensors
Example #36
0
    raise NotImplementedError("surrogate_loss not implemented")

  @staticmethod
  def _tensor_conversion_function(v, dtype=None, name=None, as_ref=False):
    _ = name
    if dtype and not dtype.is_compatible_with(v.dtype):
      raise ValueError(
          "Incompatible type conversion requested to type '%s' for variable "
          "of type '%s'" % (dtype.name, v.dtype.name))
    if as_ref:
      raise ValueError("%s: Ref type is not supported." % v)
    return v.value()


# pylint: disable=protected-access
ops.register_tensor_conversion_function(
    StochasticTensor, StochasticTensor._tensor_conversion_function)
# pylint: enable=protected-access


class _StochasticValueType(object):
  """Interface for the ValueType classes.

  This is the base class for MeanValue, SampleValue, SampleAndReshapeValue,
  and their descendants.
  """

  def pushed_above(self, unused_value_type):
    pass

  def popped_above(self, unused_value_type):
    pass
    def _read_variable_op(self):
        with ops.control_dependencies([self._parent_op]):
            return gen_resource_variable_ops.read_variable_op(
                self._handle, self._dtype)

    def set_shape(self, shape):
        self._shape = shape

    @property
    def op(self):
        """The op for this variable."""
        return self._parent_op


ops.register_tensor_conversion_function(_UnreadVariable, _dense_var_to_tensor)
ops.register_dense_tensor_like_type(_UnreadVariable)

# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.

# Note: registering for Variable after ResourceVariable because inheritance will
# otherwise lead to the wrong behavior.
ops.register_tensor_conversion_function(ResourceVariable, _dense_var_to_tensor)
ops.register_tensor_conversion_function(
    variables.Variable, variables.Variable._TensorConversionFunction)  # pylint: disable=protected-access

# pylint: disable=protected-access
ResourceVariable._OverloadAllOperators()
ops.register_dense_tensor_like_type(ResourceVariable)
Example #38
0
        return {checkpointable.VARIABLE_VALUE_KEY: _saveable_factory}


# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False):
    # Try to avoid assignments to and other mutations of MirroredVariable
    # state except through a DistributionStrategy.update() call.
    assert not as_ref
    return ops.internal_convert_to_tensor(var.get(),
                                          dtype=dtype,
                                          name=name,
                                          as_ref=as_ref)


ops.register_tensor_conversion_function(MirroredVariable,
                                        _tensor_conversion_mirrored)


class _TowerLocalSaveable(saver.BaseSaverBuilder.SaveableObject):
    """Class for defining how to restore a TowerLocalVariable."""
    def __init__(self, tower_local_variable, name):
        self._tower_local_variable = tower_local_variable

        # We use a callable so that we don't have to evaluate this expression
        # in the case where we are trying to restore instead of save.
        def tensor():
            return distribution_strategy_context.get_distribution_strategy(
            ).read_var(tower_local_variable)

        spec = saver.BaseSaverBuilder.SaveSpec(
            tensor=tensor,
    if isinstance(tensor_or_op, ops.Tensor):
        op = tensor_or_op.op
    else:
        op = tensor_or_op
    return op.type == "Const"


def _constant_tensor_conversion_function(v,
                                         dtype=None,
                                         name=None,
                                         as_ref=False):
    _ = as_ref
    return constant(v, dtype=dtype, name=name)


ops.register_tensor_conversion_function(
    (list, tuple), _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(np.ndarray,
                                        _constant_tensor_conversion_function,
                                        100)
ops.register_tensor_conversion_function(np.generic,
                                        _constant_tensor_conversion_function,
                                        100)
ops.register_tensor_conversion_function(object,
                                        _constant_tensor_conversion_function,
                                        200)


def _tensor_shape_tensor_conversion_function(s,
                                             dtype=None,
                                             name=None,
                                             as_ref=False):
Example #40
0
        captured_value = graph_placeholder(dtype=dtype or value.dtype,
                                           shape=value.shape,
                                           name=name)
        if captured_value.dtype == dtypes.resource:
            captured_value._handle_data = value._handle_data  # pylint: disable=protected-access
        tensor_map[ops.tensor_id(value)] = (value, captured_value)
    else:
        captured_value = captured_value[1]
    return captured_value


# TODO(apassos): it'd be really nice if we could scope this registration.
# Note that we register this at a higher priority than ops.Tensor since we want
# to handle subclass specific conversion before a superclass conversion.
ops.register_tensor_conversion_function(tensor.Tensor,
                                        _convert_to_graph_constant,
                                        priority=-1)


class _CapturingContext(object):
    """Tracks references to Tensors outside this context while it is active."""
    def __init__(self):
        # known_ops are ops which are created while this context is active
        self.known_ops = set()

        # captured_tensors are all tensors referenced to by ops in this context but
        # not produced in it
        self.captured_tensors = set()

    def AddOp(self, op):  # pylint: disable=invalid-name
        if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
Example #41
0
    raise NotImplementedError("surrogate_loss not implemented")

  @staticmethod
  def _tensor_conversion_function(v, dtype=None, name=None, as_ref=False):
    _ = name
    if dtype and not dtype.is_compatible_with(v.dtype):
      raise ValueError(
          "Incompatible type conversion requested to type '%s' for variable "
          "of type '%s'" % (dtype.name, v.dtype.name))
    if as_ref:
      raise ValueError("%s: Ref type is not supported." % v)
    return v.value()


# pylint: disable=protected-access
ops.register_tensor_conversion_function(
    StochasticTensor, StochasticTensor._tensor_conversion_function)
# pylint: enable=protected-access


class _StochasticValueType(object):
  """Interface for the ValueType classes.

  This is the base class for MeanValue, SampleValue, SampleAndReshapeValue,
  and their descendants.
  """

  def pushed_above(self, unused_value_type):
    pass

  def popped_above(self, unused_value_type):
    pass
      delta_get_op = delta_staging_area.get()[0]
    # Return the actual updates. The colocation constraint will be reapplied.
    return self.real_var.assign_sub(delta_get_op)

  @staticmethod
  # pylint: disable=bad-staticmethod-argument,invalid-name
  def _TensorConversionFunction(self, dtype=None, name=None, as_ref=False):
    """Utility function for converting a StagedModelVariable to a Tensor."""
    del dtype, name  # unused: this function returns the cached ref or value.
    if as_ref:
      return self._ref()
    else:
      return self._value()


ops.register_tensor_conversion_function(
    StagedModelVariable, StagedModelVariable._TensorConversionFunction)  # pylint: disable=protected-access


class StagedVariableGetter(object):
  """A variable getter through staging buffers on devices.

  Instead of a caching device, this getter tracks where the variable is used.
  And on each device, it goes through a staging buffer.
  """

  def __init__(self, device_num, devices, cpu_device, variable_mgr):
    """Initializer for StagedVariableGetter.

    Args:
      device_num: the current device index.
      devices: a list of all the devices to build towers.
Example #43
0
  tensor_value.tensor.CopyFrom(
      tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape))
  dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
  const_tensor = g.create_op(
      "Const", [], [dtype_value.type],
      attrs={"value": tensor_value, "dtype": dtype_value}, name=name).outputs[0]
  return const_tensor


@ops.RegisterShape("Const")
def _ConstantShape(op):
  return [tensor_shape.TensorShape(
      [d.size for d in op.get_attr("value").tensor_shape.dim])]


ops.register_tensor_conversion_function((list, tuple), constant, 100)
ops.register_tensor_conversion_function(np.ndarray, constant, 100)
ops.register_tensor_conversion_function(np.generic, constant, 100)
ops.register_tensor_conversion_function(object, constant, 200)

def _tensor_shape_tensor_conversion_function(s, dtype=None, name=None):
  if not s.is_fully_defined():
    raise ValueError(
        "Cannot convert a partially known TensorShape to a Tensor: %s" % s)
  if dtype is not None:
    if dtype not in (types.int32, types.int64):
      raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
  else:
    dtype = types.int32
  if name is None:
    name = "shape_as_tensor"
            return self._var.scatter_min(sparse_delta, use_locking, name)

    def scatter_max(self, sparse_delta, use_locking=False, name=None):
        with ops.device(self._device):
            return self._var.scatter_max(sparse_delta, use_locking, name)

    def scatter_update(self, sparse_delta, use_locking=False, name=None):
        with ops.device(self._device):
            return self._var.scatter_update(sparse_delta, use_locking, name)

    def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
        with ops.device(self._device):
            return self._var._dense_var_to_tensor(  # pylint: disable=protected-access
                dtype=dtype,
                name=name,
                as_ref=as_ref)

    def _as_graph_element(self):
        return self._var._as_graph_element()  # pylint: disable=protected-access


def _tensor_conversion_packed_var_and_device(var,
                                             dtype=None,
                                             name=None,
                                             as_ref=False):
    return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref)  # pylint: disable=protected-access


ops.register_tensor_conversion_function(
    PackedVarAndDevice, _tensor_conversion_packed_var_and_device)
Example #45
0
                          tensor.Tensor(1, dtype=dtype))


def _lazy_zero_tensor(zero):
    return _zeros(zero.shape, zero.dtype)


tensor.LazyZero.tensor = _lazy_zero_tensor


def _lazy_zero_to_tensor(lazy_zero, dtype=None, name=None, as_ref=False):
    del as_ref, name, dtype
    return _zeros(lazy_zero.shape, lazy_zero.dtype)


ops.register_tensor_conversion_function(tensor.LazyZero, _lazy_zero_to_tensor)


def _indexed_slices_to_tensor(value):
    """Converts an IndexedSlices object `value` to a Tensor.

  Args:
    value: An ops.IndexedSlices object.

  Returns:
    A dense Tensor representing the values in the given IndexedSlices.

  Raises:
    ValueError: If the IndexedSlices does not have the same dtype.
  """
    if value.dense_shape is None:
Example #46
0
      tensor.Tensor(shape, dtype=dtypes.int32), tensor.Tensor(1, dtype=dtype))


def _lazy_zero_tensor(zero):
  return _zeros(zero.shape, zero.dtype)


tensor.LazyZero.tensor = _lazy_zero_tensor


def _lazy_zero_to_tensor(lazy_zero, dtype=None, name=None, as_ref=False):
  del as_ref, name, dtype
  return _zeros(lazy_zero.shape, lazy_zero.dtype)


ops.register_tensor_conversion_function(tensor.LazyZero, _lazy_zero_to_tensor)


def _indexed_slices_to_tensor(value):
  """Converts an IndexedSlices object `value` to a Tensor.

  Args:
    value: An ops.IndexedSlices object.

  Returns:
    A dense Tensor representing the values in the given IndexedSlices.

  Raises:
    ValueError: If the IndexedSlices does not have the same dtype.
  """
  if value.dense_shape is None:
Example #47
0
  # The created SavedModel is hermetic, it does not depend on
  # the original file and can be moved to another path.
  tf.io.gfile.remove("file.txt")
  tf.io.gfile.rename("/tmp/saved_model", "/tmp/new_location")

  reloaded_obj = tf.saved_model.load("/tmp/new_location")
  print(reloaded_obj.func())
  ```

  Attributes:
    asset_path: A 0-D `tf.string` tensor with path to the asset.
  """
    def __init__(self, path):
        """Record the full path to the asset."""
        # The init_scope prevents functions from capturing `path` in an
        # initialization graph, since it is transient and should not end up in a
        # serialized function body.
        with ops.init_scope(), ops.device("CPU"):
            self._path = ops.convert_to_tensor(path,
                                               dtype=dtypes.string,
                                               name="asset_path")

    @property
    def asset_path(self):
        """Fetch the current asset path."""
        return self._path


ops.register_tensor_conversion_function(
    Asset, lambda asset, **kw: ops.convert_to_tensor(asset.asset_path, **kw))

# pylint: disable=unused-argument,protected-access
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
    if dtype is not None and dtype != var.value().dtype:
        print("trying to switch the dtype to ", dtype, " from ",
              var.value().dtype)
        return NotImplemented
    return var.value()


# pylint: enable=unused-argument,protected-access

# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
ops.register_tensor_conversion_function(ResourceVariable, _dense_var_to_tensor)

# pylint: disable=protected-access
ResourceVariable._OverloadAllOperators()
ops.register_dense_tensor_like_type(ResourceVariable)


@ops.RegisterGradient("ReadVariableOp")
def _ReadGrad(_, grad):
    """Gradient for read op."""
    return grad


@ops.RegisterGradient("ResourceGather")
def _GatherGrad(op, grad):
    """Gradient for gather op."""