예제 #1
0
    def testUpdateAddSubGradients(self):
        with self.cached_session():
            indices = constant_op.constant([[3], [1]])
            updates = constant_op.constant([9, 10], dtype=dtypes.float32)
            x = array_ops.ones([4], dtype=dtypes.float32)

            theoretical, numerical = gradient_checker_v2.compute_gradient(
                lambda x: array_ops.tensor_scatter_update(x, indices, updates),
                [x])
            self.assertAllClose(theoretical, numerical, 5e-4, 5e-4)
            theoretical, numerical = gradient_checker_v2.compute_gradient(
                lambda x: array_ops.tensor_scatter_add(x, indices, updates),
                [x])
            self.assertAllClose(theoretical, numerical, 5e-4, 5e-4)
            theoretical, numerical = gradient_checker_v2.compute_gradient(
                lambda x: array_ops.tensor_scatter_sub(x, indices, updates),
                [x])
            self.assertAllClose(theoretical, numerical, 5e-4, 5e-4)

            theoretical, numerical = gradient_checker_v2.compute_gradient(
                lambda updates: array_ops.tensor_scatter_update(
                    x, indices, updates), [updates])
            self.assertAllClose(theoretical, numerical, 5e-4, 5e-4)
            theoretical, numerical = gradient_checker_v2.compute_gradient(
                lambda updates: array_ops.tensor_scatter_add(
                    x, indices, updates), [updates])
            self.assertAllClose(theoretical, numerical, 5e-4, 5e-4)
            theoretical, numerical = gradient_checker_v2.compute_gradient(
                lambda updates: array_ops.tensor_scatter_sub(
                    x, indices, updates), [updates])
            self.assertAllClose(theoretical, numerical, 5e-4, 5e-4)
예제 #2
0
 def testDeterminism(self):
   a = array_ops.zeros([1])
   indices = array_ops.zeros([100000, 1], dtypes.int32)
   values = np.random.randn(100000)
   val = self.evaluate(array_ops.tensor_scatter_update(a, indices, values))
   for _ in range(5):
     val2 = self.evaluate(array_ops.tensor_scatter_update(a, indices, values))
     self.assertAllEqual(val, val2)
예제 #3
0
    def testUpdateAddSubGradients(self):

        with self.cached_session():
            indices = constant_op.constant([[3], [1]])
            updates = constant_op.constant([9, 10], dtype=dtypes.float32)
            x = array_ops.ones([4], dtype=dtypes.float32)

            assigned = array_ops.tensor_scatter_update(x, indices, updates)
            added = array_ops.tensor_scatter_add(x, indices, updates)
            subbed = array_ops.tensor_scatter_sub(x, indices, updates)

            err_assigned = gradient_checker.compute_gradient_error(
                x, [4], assigned, [4])
            err_added = gradient_checker.compute_gradient_error(
                x, [4], added, [4])
            err_subbed = gradient_checker.compute_gradient_error(
                x, [4], subbed, [4])

            self.assertLess(err_assigned, 2e-4)
            self.assertLess(err_added, 2e-4)
            self.assertLess(err_subbed, 2e-4)

            err_assigned_wrt_updates = gradient_checker.compute_gradient_error(
                updates, [2], assigned, [4])
            err_added_wrt_updates = gradient_checker.compute_gradient_error(
                updates, [2], added, [4])
            err_subbed_wrt_updates = gradient_checker.compute_gradient_error(
                updates, [2], subbed, [4])

            self.assertLess(err_assigned_wrt_updates, 2e-4)
            self.assertLess(err_added_wrt_updates, 2e-4)
            self.assertLess(err_subbed_wrt_updates, 2e-4)
예제 #4
0
        def _TestFn():
            indices = constant_op.constant([[4], [3], [1], [7]])
            updates = constant_op.constant([9, 10, 11, 12],
                                           dtype=dtypes.float32)
            t = array_ops.ones([8], dtype=dtypes.float32)

            return array_ops.tensor_scatter_update(t, indices, updates)
예제 #5
0
def _TensorScatterUpdateGrad(op, grad):
  indices = op.inputs[1]
  updates_grad = array_ops.gather_nd(grad, indices)
  tensor_grad = array_ops.tensor_scatter_update(
      array_ops.identity(grad), indices,
      array_ops.zeros_like(op.inputs[2], dtype=grad.dtype))
  return [tensor_grad, None, updates_grad]
예제 #6
0
  def testUpdateAddSubGradients(self):

    with self.cached_session():
      indices = constant_op.constant([[3], [1]])
      updates = constant_op.constant([9, 10], dtype=dtypes.float32)
      x = array_ops.ones([4], dtype=dtypes.float32)

      assigned = array_ops.tensor_scatter_update(x, indices, updates)
      added = array_ops.tensor_scatter_add(x, indices, updates)
      subbed = array_ops.tensor_scatter_sub(x, indices, updates)

      err_assigned = gradient_checker.compute_gradient_error(
          x, [4], assigned, [4])
      err_added = gradient_checker.compute_gradient_error(x, [4], added, [4])
      err_subbed = gradient_checker.compute_gradient_error(x, [4], subbed, [4])

      self.assertLess(err_assigned, 2e-4)
      self.assertLess(err_added, 2e-4)
      self.assertLess(err_subbed, 2e-4)

      err_assigned_wrt_updates = gradient_checker.compute_gradient_error(
          updates, [2], assigned, [4])
      err_added_wrt_updates = gradient_checker.compute_gradient_error(
          updates, [2], added, [4])
      err_subbed_wrt_updates = gradient_checker.compute_gradient_error(
          updates, [2], subbed, [4])

      self.assertLess(err_assigned_wrt_updates, 2e-4)
      self.assertLess(err_added_wrt_updates, 2e-4)
      self.assertLess(err_subbed_wrt_updates, 2e-4)
예제 #7
0
def _TensorScatterUpdateGrad(op, grad):
    indices = op.inputs[1]
    updates_grad = array_ops.gather_nd(grad, indices)
    tensor_grad = array_ops.tensor_scatter_update(
        array_ops.identity(grad), indices,
        array_ops.zeros_like(op.inputs[2], dtype=grad.dtype))
    return [tensor_grad, None, updates_grad]
예제 #8
0
 def testUpdateRepeatedIndices1D(self):
   if test_util.is_gpu_available():
     self.skipTest("Duplicate indices scatter is non-deterministic on GPU")
   a = array_ops.zeros([10, 1])
   b = array_ops.tensor_scatter_update(a, [[5], [5]], [[4], [8]])
   self.assertAllEqual(
       b,
       constant_op.constant([[0.], [0.], [0.], [0.], [0.], [8.], [0.], [0.],
                             [0.], [0.]]))
예제 #9
0
def moveaxis(a, source, destination):  # pylint: disable=missing-docstring
    """Raises ValueError if source, destination not in (-ndim(a), ndim(a))."""
    if not source and not destination:
        return a

    a = asarray(a).data

    if isinstance(source, int):
        source = (source, )
    if isinstance(destination, int):
        destination = (destination, )

    a_rank = np_utils._maybe_static(array_ops.rank(a))  # pylint: disable=protected-access

    def _correct_axis(axis, rank):
        if axis < 0:
            return axis + rank
        return axis

    source = tuple(_correct_axis(axis, a_rank) for axis in source)
    destination = tuple(_correct_axis(axis, a_rank) for axis in destination)

    if a.shape.rank is not None:
        perm = [i for i in range(a_rank) if i not in source]
        for dest, src in sorted(zip(destination, source)):
            assert dest <= len(perm)
            perm.insert(dest, src)
    else:
        r = math_ops.range(a_rank)

        def _remove_indices(a, b):
            """Remove indices (`b`) from `a`."""
            items = array_ops.unstack(sort_ops.sort(array_ops.stack(b)),
                                      num=len(b))

            i = 0
            result = []

            for item in items:
                result.append(a[i:item])
                i = item + 1

            result.append(a[i:])

            return array_ops.concat(result, 0)

        minus_sources = _remove_indices(r, source)
        minus_dest = _remove_indices(r, destination)

        perm = array_ops.scatter_nd(array_ops.expand_dims(minus_dest, 1),
                                    minus_sources, [a_rank])
        perm = array_ops.tensor_scatter_update(
            perm, array_ops.expand_dims(destination, 1), source)
    a = array_ops.transpose(a, perm)

    return np_utils.tensor_to_ndarray(a)
예제 #10
0
def _descending_sort(values, axis, return_argsort=False):
  """Sorts values in reverse using `top_k`.

  Args:
    values: Tensor of numeric values.
    axis: Index of the axis which values should be sorted along.
    return_argsort: If False, return the sorted values. If True, return the
      indices that would sort the values.

  Returns:
    The sorted values.
  """
  # TODO(b/190410105): replace with a proper sort kernel.
  k = array_ops.shape(values)[axis]
  rank = array_ops.rank(values)
  static_rank = values.shape.ndims
  # Fast path: sorting the last axis.
  if axis == -1 or axis + 1 == values.get_shape().ndims:
    top_k_input = values
    transposition = None
  else:
    # Otherwise, transpose the array. Swap axes `axis` and `rank - 1`.
    if axis < 0:
      # Calculate the actual axis index if counting from the end. Use the static
      # rank if available, or else make the axis back into a tensor.
      axis += static_rank or rank
    if static_rank is not None:
      # Prefer to calculate the transposition array in NumPy and make it a
      # constant.
      transposition = constant_op.constant(
          np.r_[
              # Axes up to axis are unchanged.
              np.arange(axis),
              # Swap axis and rank - 1.
              [static_rank - 1],
              # Axes in [axis + 1, rank - 1) are unchanged.
              np.arange(axis + 1, static_rank - 1),
              # Swap axis and rank - 1.
              [axis]],
          name='transposition')
    else:
      # Generate the transposition array from the tensors.
      transposition = array_ops.tensor_scatter_update(
          math_ops.range(rank), [[axis], [rank-1]], [rank-1, axis])
    top_k_input = array_ops.transpose(values, transposition)

  values, indices = nn_ops.top_k(top_k_input, k)
  return_value = indices if return_argsort else values
  if transposition is not None:
    # transposition contains a single cycle of length 2 (swapping 2 elements),
    # so it is an involution (it is its own inverse).
    return_value = array_ops.transpose(return_value, transposition)
  return return_value
예제 #11
0
 def testUpdateRepeatedIndices2D(self):
   if test_util.is_gpu_available():
     self.skipTest("Duplicate indices scatter is non-deterministic on GPU")
   a = array_ops.zeros([10, 10])
   b = array_ops.tensor_scatter_update(
       a, [[5], [6], [6]],
       [math_ops.range(10),
        math_ops.range(11, 21),
        math_ops.range(10, 20)])
   self.assertAllEqual(
       b[6],
       constant_op.constant(
           [10., 11., 12., 13., 14., 15., 16., 17., 18., 19.]))
예제 #12
0
  def testUpdateAddSub(self):
    indices = constant_op.constant([[4], [3], [1], [7]])
    updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
    t = array_ops.ones([8], dtype=dtypes.float32)
    assigned = array_ops.tensor_scatter_update(t, indices, updates)
    added = array_ops.tensor_scatter_add(t, indices, updates)
    subbed = array_ops.tensor_scatter_sub(t, indices, updates)

    self.assertAllEqual(assigned,
                        constant_op.constant([1, 11, 1, 10, 9, 1, 1, 12]))
    self.assertAllEqual(added,
                        constant_op.constant([1, 12, 1, 11, 10, 1, 1, 13]))
    self.assertAllEqual(subbed,
                        constant_op.constant([1, -10, 1, -9, -8, 1, 1, -11]))
예제 #13
0
    def testUpdateMinMax(self):
        indices = constant_op.constant([[4], [3], [1], [7]])
        updates = constant_op.constant([0, 2, -1, 1.2], dtype=dtypes.float32)
        t = array_ops.ones([8], dtype=dtypes.float32)
        assigned = array_ops.tensor_scatter_update(t, indices, updates)
        min_result = array_ops.tensor_scatter_min(t, indices, updates)
        max_result = array_ops.tensor_scatter_max(t, indices, updates)

        self.assertAllEqual(assigned,
                            constant_op.constant([1, -1, 1, 2, 0, 1, 1, 1.2]))
        self.assertAllEqual(min_result,
                            constant_op.constant([1, -1, 1, 1, 0, 1, 1, 1]))
        self.assertAllEqual(max_result,
                            constant_op.constant([1, 1, 1, 2, 1, 1, 1, 1.2]))
예제 #14
0
    def testUpdateAddSub(self):
        indices = constant_op.constant([[4], [3], [1], [7]])
        updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
        t = array_ops.ones([8], dtype=dtypes.float32)
        assigned = array_ops.tensor_scatter_update(t, indices, updates)
        added = array_ops.tensor_scatter_add(t, indices, updates)
        subbed = array_ops.tensor_scatter_sub(t, indices, updates)

        self.assertAllEqual(assigned,
                            constant_op.constant([1, 11, 1, 10, 9, 1, 1, 12]))
        self.assertAllEqual(added,
                            constant_op.constant([1, 12, 1, 11, 10, 1, 1, 13]))
        self.assertAllEqual(
            subbed, constant_op.constant([1, -10, 1, -9, -8, 1, 1, -11]))
예제 #15
0
def swapaxes(a, axis1, axis2):  # pylint: disable=missing-docstring
    a = asarray(a)

    a_rank = array_ops.rank(a)
    if axis1 < 0:
        axis1 += a_rank
    if axis2 < 0:
        axis2 += a_rank

    perm = math_ops.range(a_rank)
    perm = array_ops.tensor_scatter_update(perm, [[axis1], [axis2]],
                                           [axis2, axis1])
    a = array_ops.transpose(a, perm)

    return np_utils.tensor_to_ndarray(a)
예제 #16
0
  def testTensorScatterUpdateWithStrings(self):
    indices = constant_op.constant([[4], [3], [1], [7]])
    updates = constant_op.constant(["there", "there", "there", "12"],
                                   dtype=dtypes.string)
    tensor = constant_op.constant([
        "hello", "hello", "hello", "hello", "hello", "hello", "hello", "hello"
    ],
                                  dtype=dtypes.string)
    updated = array_ops.tensor_scatter_update(tensor, indices, updates)

    self.assertAllEqual(
        updated,
        constant_op.constant([
            "hello", "there", "hello", "there", "there", "hello", "hello", "12"
        ]))
예제 #17
0
def _ragged_tensor_scatter_nd_update(params, indices, updates):
    """Version of tensor_scatter_nd_update() where the values are ragged."""
    # Create a RT in the shape of `params` and containing the "global" positions.
    # Here "global" means the element position in the flat values Tensor.
    global_positions_flat = math_ops.range(array_ops.size(params.flat_values))
    global_positions = params.with_flat_values(global_positions_flat)

    global_indices = array_ops.batch_gather(global_positions, indices)
    update_indices = global_indices.flat_values
    update_indices = array_ops.expand_dims(update_indices, -1)
    update_indices = math_ops.cast(update_indices, params.dtype)
    params_flat = params.flat_values
    update_values = math_ops.cast(updates.flat_values, params_flat.dtype)
    results_flat = array_ops.tensor_scatter_update(params_flat, update_indices,
                                                   update_values)
    return params.with_flat_values(results_flat)
예제 #18
0
    def get_selectable(self, input_ids, axis):
        """See `get_selectable()` in superclass."""
        selectable = super(FirstNItemSelector,
                           self).get_selectable(input_ids, axis)
        axis = array_ops.get_positive_axis(
            axis, input_ids.ragged_rank + input_ids.flat_values.shape.rank)
        # Create a positions RT and mask out positions that are not selectable
        positions_flat = math_ops.range(array_ops.size(input_ids.flat_values))
        positions = input_ids.with_flat_values(positions_flat)
        selectable_positions = ragged_array_ops.boolean_mask(
            positions, selectable)

        # merge to the desired axis
        selectable_positions = selectable_positions.merge_dims(
            1, axis) if axis > 1 else selectable_positions

        # Get a selection mask based off of how many items are desired for selection
        merged_axis = axis - (axis - 1)
        selection_mask = _get_selection_mask(selectable_positions,
                                             self._num_to_select, merged_axis)
        # Mask out positions that were not selected.
        selected_positions = ragged_array_ops.boolean_mask(
            selectable_positions, selection_mask)

        # Now that we have all the positions which were chosen, we recreate a mask
        # (matching the original input's shape) where the value is True if it was
        # selected. We do this by creating a "all false" RT and scattering true
        # values to the positions chosen for selection.
        all_true = selected_positions.with_flat_values(
            array_ops.ones_like(selected_positions.flat_values))
        all_false = math_ops.cast(
            array_ops.zeros(array_ops.shape(input_ids.flat_values)),
            dtypes.int32)
        results_flat = array_ops.tensor_scatter_update(
            all_false, array_ops.expand_dims(selected_positions.flat_values,
                                             -1), all_true.flat_values)
        results = input_ids.with_flat_values(results_flat)
        results = math_ops.cast(results, dtypes.bool)

        # Reduce until input.shape[:axis]
        for _ in range(input_ids.shape.ndims - axis - 1):
            results = math_ops.reduce_all(results, -1)
        return results
예제 #19
0
    def _TestFn():
      indices = constant_op.constant([[4], [3], [1], [7]])
      updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
      t = array_ops.ones([8], dtype=dtypes.float32)

      return array_ops.tensor_scatter_update(t, indices, updates)
예제 #20
0
            def _TestFn():
                indices = constant_op.constant([[4], [3], [1], [7]])
                updates = constant_op.constant([9, 10, 11, 12], dtype=dtype)  # pylint: disable=cell-var-from-loop
                t = array_ops.ones([8], dtype=dtype)  # pylint: disable=cell-var-from-loop

                return array_ops.tensor_scatter_update(t, indices, updates)