def scatter_nd_update(ref, indices, updates, use_locking=True, name=None): r"""Applies sparse `updates` to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) update = tf.scatter_nd_update(ref, indices, updates) with tf.Session() as sess: print sess.run(update) ``` The resulting update to ref would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See @{tf.scatter_nd} for more details about how to make updates to slices. Args: ref: A Variable. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into ref. updates: A `Tensor`. Must have the same type as `ref`. A Tensor. Must have the same type as ref. A tensor of updated values to add to ref. use_locking: An optional `bool`. Defaults to `True`. An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: The value of the variable after the update. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_nd_update( ref, indices, updates, use_locking, name) return ref._lazy_read(gen_state_ops.resource_scatter_nd_update( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name))
def scatter_nd_update(ref, indices, updates, use_locking=True, name=None): r"""Applies sparse `updates` to individual values or slices in a Variable. `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. `indices` must be integer tensor, containing indices into `ref`. It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. The innermost dimension of `indices` (with length `K`) corresponds to indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th dimension of `ref`. `updates` is `Tensor` of rank `Q-1+P-K` with shape: ``` [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. ``` For example, say we want to update 4 scattered elements to a rank-1 tensor to 8 elements. In Python, that update would look like this: ```python ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) indices = tf.constant([[4], [3], [1] ,[7]]) updates = tf.constant([9, 10, 11, 12]) update = tf.scatter_nd_update(ref, indices, updates) with tf.Session() as sess: print sess.run(update) ``` The resulting update to ref would look like this: [1, 11, 3, 10, 9, 6, 7, 12] See `tf.scatter_nd` for more details about how to make updates to slices. Args: ref: A Variable. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A tensor of indices into ref. updates: A `Tensor`. Must have the same type as `ref`. A Tensor. Must have the same type as ref. A tensor of updated values to add to ref. use_locking: An optional `bool`. Defaults to `True`. An optional bool. Defaults to True. If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: The value of the variable after the update. """ if ref.dtype._is_ref_dtype: return gen_state_ops.scatter_nd_update( ref, indices, updates, use_locking, name) return ref._lazy_read(gen_state_ops.resource_scatter_nd_update( # pylint: disable=protected-access ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name))
def testScatterNdUpdateAddOps(self): with self.session() as sess, self.test_scope(): handle = resource_variable_ops.var_handle_op( dtype=dtypes.float32, shape=[8]) sess.run( resource_variable_ops.assign_variable_op( handle, constant_op.constant([1] * 8, dtype=dtypes.float32))) indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32) updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32) expected = np.array([1, 11, 1, 10, 9, 1, 1, 12]) sess.run( gen_state_ops.resource_scatter_nd_update(handle, indices, updates)) read = resource_variable_ops.read_variable_op( handle, dtype=dtypes.float32) self.assertAllClose(expected, self.evaluate(read))
def testScatterNdUpdateAddOps(self): with self.test_session() as sess, self.test_scope(): handle = resource_variable_ops.var_handle_op( dtype=dtypes.float32, shape=[8]) sess.run( resource_variable_ops.assign_variable_op( handle, constant_op.constant([1] * 8, dtype=dtypes.float32))) indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32) updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32) expected = np.array([1, 11, 1, 10, 9, 1, 1, 12]) sess.run( gen_state_ops.resource_scatter_nd_update(handle, indices, updates)) read = resource_variable_ops.read_variable_op( handle, dtype=dtypes.float32) self.assertAllClose(expected, self.evaluate(read))