def testUpdateAddSubGradients(self): with self.cached_session(): indices = constant_op.constant([[3], [1]]) updates = constant_op.constant([9, 10], dtype=dtypes.float32) x = array_ops.ones([4], dtype=dtypes.float32) theoretical, numerical = gradient_checker_v2.compute_gradient( lambda x: array_ops.tensor_scatter_update(x, indices, updates), [x]) self.assertAllClose(theoretical, numerical, 5e-4, 5e-4) theoretical, numerical = gradient_checker_v2.compute_gradient( lambda x: array_ops.tensor_scatter_add(x, indices, updates), [x]) self.assertAllClose(theoretical, numerical, 5e-4, 5e-4) theoretical, numerical = gradient_checker_v2.compute_gradient( lambda x: array_ops.tensor_scatter_sub(x, indices, updates), [x]) self.assertAllClose(theoretical, numerical, 5e-4, 5e-4) theoretical, numerical = gradient_checker_v2.compute_gradient( lambda updates: array_ops.tensor_scatter_update( x, indices, updates), [updates]) self.assertAllClose(theoretical, numerical, 5e-4, 5e-4) theoretical, numerical = gradient_checker_v2.compute_gradient( lambda updates: array_ops.tensor_scatter_add( x, indices, updates), [updates]) self.assertAllClose(theoretical, numerical, 5e-4, 5e-4) theoretical, numerical = gradient_checker_v2.compute_gradient( lambda updates: array_ops.tensor_scatter_sub( x, indices, updates), [updates]) self.assertAllClose(theoretical, numerical, 5e-4, 5e-4)
def testUpdateAddSubGradients(self): with self.cached_session(): indices = constant_op.constant([[3], [1]]) updates = constant_op.constant([9, 10], dtype=dtypes.float32) x = array_ops.ones([4], dtype=dtypes.float32) assigned = array_ops.tensor_scatter_update(x, indices, updates) added = array_ops.tensor_scatter_add(x, indices, updates) subbed = array_ops.tensor_scatter_sub(x, indices, updates) err_assigned = gradient_checker.compute_gradient_error( x, [4], assigned, [4]) err_added = gradient_checker.compute_gradient_error( x, [4], added, [4]) err_subbed = gradient_checker.compute_gradient_error( x, [4], subbed, [4]) self.assertLess(err_assigned, 2e-4) self.assertLess(err_added, 2e-4) self.assertLess(err_subbed, 2e-4) err_assigned_wrt_updates = gradient_checker.compute_gradient_error( updates, [2], assigned, [4]) err_added_wrt_updates = gradient_checker.compute_gradient_error( updates, [2], added, [4]) err_subbed_wrt_updates = gradient_checker.compute_gradient_error( updates, [2], subbed, [4]) self.assertLess(err_assigned_wrt_updates, 2e-4) self.assertLess(err_added_wrt_updates, 2e-4) self.assertLess(err_subbed_wrt_updates, 2e-4)
def testUpdateAddSubGradients(self): with self.cached_session(): indices = constant_op.constant([[3], [1]]) updates = constant_op.constant([9, 10], dtype=dtypes.float32) x = array_ops.ones([4], dtype=dtypes.float32) assigned = array_ops.tensor_scatter_update(x, indices, updates) added = array_ops.tensor_scatter_add(x, indices, updates) subbed = array_ops.tensor_scatter_sub(x, indices, updates) err_assigned = gradient_checker.compute_gradient_error( x, [4], assigned, [4]) err_added = gradient_checker.compute_gradient_error(x, [4], added, [4]) err_subbed = gradient_checker.compute_gradient_error(x, [4], subbed, [4]) self.assertLess(err_assigned, 2e-4) self.assertLess(err_added, 2e-4) self.assertLess(err_subbed, 2e-4) err_assigned_wrt_updates = gradient_checker.compute_gradient_error( updates, [2], assigned, [4]) err_added_wrt_updates = gradient_checker.compute_gradient_error( updates, [2], added, [4]) err_subbed_wrt_updates = gradient_checker.compute_gradient_error( updates, [2], subbed, [4]) self.assertLess(err_assigned_wrt_updates, 2e-4) self.assertLess(err_added_wrt_updates, 2e-4) self.assertLess(err_subbed_wrt_updates, 2e-4)
def _body(i, dist, quota_used): index = math_ops.mod(i, num_segments) updates = array_ops.where(dist[..., index] < row_lengths[..., index], array_ops.ones_like(dist[..., index]), array_ops.zeros_like(dist[..., index])) scatter_index = array_ops.tile([index], [batch_size]) scatter_index = array_ops.expand_dims(scatter_index, -1) batch_dim = array_ops.reshape(math_ops.range(batch_size), [batch_size, 1]) scatter_index_2d = array_ops.concat([batch_dim, scatter_index], -1) new_dist = array_ops.tensor_scatter_add(dist, scatter_index_2d, updates) return i + 1, new_dist, quota_used + updates
def testUpdateAddSub(self): indices = constant_op.constant([[4], [3], [1], [7]]) updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32) t = array_ops.ones([8], dtype=dtypes.float32) assigned = array_ops.tensor_scatter_update(t, indices, updates) added = array_ops.tensor_scatter_add(t, indices, updates) subbed = array_ops.tensor_scatter_sub(t, indices, updates) self.assertAllEqual(assigned, constant_op.constant([1, 11, 1, 10, 9, 1, 1, 12])) self.assertAllEqual(added, constant_op.constant([1, 12, 1, 11, 10, 1, 1, 13])) self.assertAllEqual( subbed, constant_op.constant([1, -10, 1, -9, -8, 1, 1, -11]))
def testUpdateAddSub(self): indices = constant_op.constant([[4], [3], [1], [7]]) updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32) t = array_ops.ones([8], dtype=dtypes.float32) assigned = array_ops.tensor_scatter_update(t, indices, updates) added = array_ops.tensor_scatter_add(t, indices, updates) subbed = array_ops.tensor_scatter_sub(t, indices, updates) self.assertAllEqual(assigned, constant_op.constant([1, 11, 1, 10, 9, 1, 1, 12])) self.assertAllEqual(added, constant_op.constant([1, 12, 1, 11, 10, 1, 1, 13])) self.assertAllEqual(subbed, constant_op.constant([1, -10, 1, -9, -8, 1, 1, -11]))