Ejemplo n.º 1
0
    def test_skipped_ops(self):
        with context.eager_mode():
            x = constant_op.constant(np.ones((1, 1, 1, 1)).astype(np.float32))

            # Cast is on the hardcoded list of ops to skip
            gen_math_ops.cast(x, dtypes.float64)
            self.assertEmpty(self._get_new_node_defs())

            gen_nn_ops.conv2d(x, x, [1, 1, 1, 1], 'SAME')
            y = constant_op.constant(np.zeros((1, 1, 1, 1)).astype(np.float32))
            # Duplicate ops are skipped, even if input values are different
            gen_nn_ops.conv2d(x, y, [1, 1, 1, 1], 'SAME')
            if not IsMklEnabled():
                self.assertLen(self._get_new_node_defs(), 1)
            else:
                ndefs = self._get_new_node_defs()
                if (len(ndefs) >= 1 and ndefs[0].op != ndefs[1].op):
                    # One of the ops got rewritten by oneDNN optimization pass
                    self.assertLen(ndefs, 2)
                else:
                    self.assertLen(ndefs, 1)

            x = constant_op.constant(
                np.ones((1, 1, 1, 1, 1, 1)).astype(np.float32))
            paddings = constant_op.constant(np.ones((6, 2)).astype(np.int32))
            constant_values = constant_op.constant(0.)
            # If an host int32 input has more than 10 elements, the op is skipped
            gen_array_ops.pad_v2(x, paddings, constant_values)
            self.assertEmpty(self._get_new_node_defs())
Ejemplo n.º 2
0
def sequence_mask_mid(lengths, maxlen=None, dtype=dtypes.bool, name=None):
    """Return a mask tensor representing the first N positions of each row.

    Example:

    ```python
    tf.sequence_mask([1, 3, 2], 5) =
      [[True, False, False, False, False],
       [False,False, False, False, False],
       [False, True, False, False, False]]
    ```

    Args:
      lengths: 1D integer tensor, all its values < maxlen.
      maxlen: scalar integer tensor, maximum length of each row. Default: use
              maximum over lengths.
      dtype: output type of the resulting tensor.
      name: name of the op.
    Returns:
      A 2D mask tensor, as shown in the example above, cast to specified dtype.

    Raises:
      ValueError: if the arguments have invalid rank.
    """
    with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
        lengths = (np.array(lengths)) / 2
        lengths = ops.convert_to_tensor(lengths, dtype=tf.int32)
        # lengths = ops.convert_to_tensor(lengths)
        if lengths.get_shape().ndims != 1:
            raise ValueError("lengths must be 1D for sequence_mask")

        if maxlen is None:
            maxlen = gen_math_ops._max(lengths, [0])
        else:
            maxlen = ops.convert_to_tensor(maxlen)
        if maxlen.get_shape().ndims != 0:
            raise ValueError("maxlen must be scalar for sequence_mask")

        # The basic idea is to compare a range row vector of size maxlen:
        # [0, 1, 2, 3, 4]
        # to length as a matrix with 1 column: [[1], [3], [2]].
        # Because of broadcasting on both arguments this comparison results
        # in a matrix of size (len(lengths), maxlen)
        row_vector = gen_math_ops._range(constant(0, maxlen.dtype), maxlen,
                                         constant(1, maxlen.dtype))
        # Since maxlen >= max(lengths), it is safe to use maxlen as a cast
        # authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
        matrix_0 = gen_math_ops.cast(expand_dims(lengths, 1), maxlen.dtype)
        matrix_1 = gen_math_ops.cast(expand_dims(lengths - 1, 1), maxlen.dtype)
        result_0 = (row_vector < matrix_0)
        result_1 = (row_vector >= matrix_1)
        result = tf.logical_and(result_0, result_1)

        if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
            return result
        else:
            return gen_math_ops.cast(result, dtype)
Ejemplo n.º 3
0
        def dropped_inputs():
            rate = self.rate
            noise_shape = self.noise_shape
            seed = self.seed
            with ops.name_scope(None, "coordinated_dropout", [inputs]) as name:
                is_rate_number = isinstance(rate, numbers.Real)
                if is_rate_number and (rate < 0 or rate >= 1):
                    raise ValueError(
                        "rate must be a scalar tensor or a float in the "
                        "range [0, 1), got %g" % rate)
                x = ops.convert_to_tensor(inputs, name="x")
                x_dtype = x.dtype
                if not x_dtype.is_floating:
                    raise ValueError(
                        "x has to be a floating point tensor since it's going "
                        "to be scaled. Got a %s tensor instead." % x_dtype)
                is_executing_eagerly = context.executing_eagerly()
                if not tensor_util.is_tensor(rate):
                    if is_rate_number:
                        keep_prob = 1 - rate
                        scale = 1 / keep_prob
                        scale = ops.convert_to_tensor(scale, dtype=x_dtype)
                        ret = gen_math_ops.mul(x, scale)
                    else:
                        raise ValueError(
                            "rate is neither scalar nor scalar tensor %r" %
                            rate)
                else:
                    rate.get_shape().assert_has_rank(0)
                    rate_dtype = rate.dtype
                    if rate_dtype != x_dtype:
                        if not rate_dtype.is_compatible_with(x_dtype):
                            raise ValueError(
                                "Tensor dtype %s is incomptaible with Tensor dtype %s: %r"
                                % (x_dtype.name, rate_dtype.name, rate))
                        rate = gen_math_ops.cast(rate, x_dtype, name="rate")
                    one_tensor = constant_op.constant(1, dtype=x_dtype)
                    ret = gen_math_ops.real_div(
                        x, gen_math_ops.sub(one_tensor, rate))

                noise_shape = nn_ops._get_noise_shape(x, noise_shape)
                # Sample a uniform distribution on [0.0, 1.0) and select values larger
                # than rate.
                #
                # NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
                # and subtract 1.0.
                random_tensor = random_ops.random_uniform(noise_shape,
                                                          seed=seed,
                                                          dtype=x_dtype)
                # NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
                # hence a >= comparison is used.
                keep_mask = random_tensor >= rate
                ret = gen_math_ops.mul(ret,
                                       gen_math_ops.cast(keep_mask, x_dtype))
                if not is_executing_eagerly:
                    ret.set_shape(x.get_shape())
                return ret, keep_mask
Ejemplo n.º 4
0
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
    """Returns a mask tensor representing the first N positions of each cell.
	If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
	dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
	```
	mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
	```
	Examples:
	```python
	tf.sequence_mask([1, 3, 2], 5)  # [[True, False, False, False, False],
																	#  [True, True, True, False, False],
																	#  [True, True, False, False, False]]
	tf.sequence_mask([[1, 3],[2,0]])  # [[[True, False, False],
																		#   [True, True, True]],
																		#  [[True, True, False],
																		#   [False, False, False]]]
	```
	Args:
		lengths: integer tensor, all its values <= maxlen.
		maxlen: scalar integer tensor, size of last dimension of returned tensor.
			Default is the maximum value in `lengths`.
		dtype: output type of the resulting tensor.
		name: name of the op.
	Returns:
		A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
	Raises:
		ValueError: if `maxlen` is not a scalar.
	"""
    with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
        lengths = ops.convert_to_tensor(lengths)

        if maxlen is None:
            maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
            maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)
        else:
            maxlen = ops.convert_to_tensor(maxlen)
        if maxlen.get_shape(
        ).ndims is not None and maxlen.get_shape().ndims != 0:
            raise ValueError("maxlen must be scalar for sequence_mask")

        # The basic idea is to compare a range row vector of size maxlen:
        # [0, 1, 2, 3, 4]
        # to length as a matrix with 1 column: [[1], [3], [2]].
        # Because of broadcasting on both arguments this comparison results
        # in a matrix of size (len(lengths), maxlen)
        row_vector = gen_math_ops._range(constant(0, maxlen.dtype), maxlen,
                                         constant(1, maxlen.dtype))
        # Since maxlen >= max(lengths), it is safe to use maxlen as a cast
        # authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
        matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
        result = row_vector < matrix

        if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
            return result
        else:
            return gen_math_ops.cast(result, dtype)
Ejemplo n.º 5
0
def zeros_like(tensor, dtype=None, name=None):
    """Creates a tensor with all elements set to zero.

  Given a single tensor (`tensor`), this operation returns a tensor of the
  same type and shape as `tensor` with all elements set to zero. Optionally,
  you can use `dtype` to specify a new type for the returned tensor.

  For example:

  ```python
  # 'tensor' is [[1, 2, 3], [4, 5, 6]]
  tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
  ```

  Args:
    tensor: A `Tensor`.
    dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
    `int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with all elements set to zero.
  """
    with ops.op_scope([tensor], name, "zeros_like") as name:
        tensor = ops.convert_to_tensor(tensor, name="tensor")
        ret = gen_array_ops._zeros_like(tensor)
        if (dtype is not None) and (tensor.dtype != dtype):
            ret = gen_math_ops.cast(ret, dtype)
        return ret
Ejemplo n.º 6
0
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
    if context.executing_eagerly() and not isinstance(
            input,
        (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
        input = ops.convert_to_tensor(input)
        np_out_type = out_type.as_numpy_dtype
        num_elements = np.prod(input._shape_tuple(), dtype=np_out_type)
        return ops.convert_to_tensor(num_elements, dtype=out_type)
    with ops.name_scope(name, "Size", [input]) as name:
        if isinstance(
                input,
            (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
            return gen_math_ops.prod(gen_math_ops.cast(input.dense_shape,
                                                       out_type),
                                     0,
                                     name=name)
        else:
            input_tensor = ops.convert_to_tensor(input)
            input_shape = input_tensor.get_shape()
            if optimize:
                if input_shape.is_fully_defined():
                    return constant(input_shape.num_elements(),
                                    out_type,
                                    name=name)
                if input_shape.dims and any(dim == 0
                                            for dim in input_shape.dims):
                    return constant(0, out_type, name=name)
            return gen_array_ops.size(input, name=name, out_type=out_type)
Ejemplo n.º 7
0
def zeros_like(tensor, dtype=None, name=None):
  """Creates a tensor with all elements set to zero.

  Given a single tensor (`tensor`), this operation returns a tensor of the
  same type and shape as `tensor` with all elements set to zero. Optionally,
  you can use `dtype` to specify a new type for the returned tensor.

  For example:

  ```python
  # 'tensor' is [[1, 2, 3], [4, 5, 6]]
  tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]
  ```

  Args:
    tensor: A `Tensor`.
    dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
    `int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with all elements set to zero.
  """
  with ops.op_scope([tensor], name, "zeros_like") as name:
    tensor = ops.convert_to_tensor(tensor, name="tensor")
    ret = gen_array_ops._zeros_like(tensor)
    if (dtype is not None) and (tensor.dtype != dtype):
      ret = gen_math_ops.cast(ret, dtype)
    return ret
Ejemplo n.º 8
0
 def call(self, inputs):
     inputs = ops.convert_to_tensor_v2(inputs, dtype=self.dtype)
     inputs = gen_math_ops.cast(inputs, dtypes.float32)
     kernel = (1.0 / self.kernel_scale) * self.unscaled_kernel
     outputs = gen_math_ops.mat_mul(inputs, kernel)
     outputs = nn.bias_add(outputs, self.bias)
     return gen_math_ops.cos(outputs)
Ejemplo n.º 9
0
def convert_to_int_tensor(tensor, name, dtype=dtypes.int32):
  """Converts the given value to an integer Tensor."""
  tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype)
  if tensor.dtype.is_integer:
    tensor = gen_math_ops.cast(tensor, dtype)
  else:
    raise TypeError("%s must be an integer tensor; dtype=%s" %
                    (name, tensor.dtype))
  return tensor
Ejemplo n.º 10
0
    def test_skipped_ops(self):
        with context.eager_mode():
            x = constant_op.constant(np.ones((1, 1, 1, 1)).astype(np.float32))

            # Cast is on the hardcoded list of ops to skip
            gen_math_ops.cast(x, dtypes.float64)
            self.assertEmpty(self._get_new_node_defs())

            gen_nn_ops.conv2d(x, x, [1, 1, 1, 1], 'SAME')
            y = constant_op.constant(np.zeros((1, 1, 1, 1)).astype(np.float32))
            # Duplicate ops are skipped, even if input values are different
            gen_nn_ops.conv2d(x, y, [1, 1, 1, 1], 'SAME')
            self.assertLen(self._get_new_node_defs(), 1)

            x = constant_op.constant(
                np.ones((1, 1, 1, 1, 1, 1)).astype(np.float32))
            paddings = constant_op.constant(np.ones((6, 2)).astype(np.int32))
            constant_values = constant_op.constant(0.)
            # If an host int32 input has more than 10 elements, the op is skipped
            gen_array_ops.pad_v2(x, paddings, constant_values)
            self.assertEmpty(self._get_new_node_defs())
Ejemplo n.º 11
0
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
    with ops.name_scope(name, "Shape", [input]) as name:
        if isinstance(
                input,
            (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
            return gen_math_ops.cast(input.dense_shape, out_type)
        else:
            if not context.executing_eagerly():
                input_tensor = ops.convert_to_tensor(input)
                input_shape = input_tensor.get_shape()
                if optimize and input_shape.is_fully_defined():
                    return constant(input_shape.as_list(), out_type, name=name)
            return gen_array_ops.shape(input, name=name, out_type=out_type)
Ejemplo n.º 12
0
  def call(self, inputs):
    def _bucketize_op(bins):
      bins = [gen_math_ops.cast(bins, dtypes.float32)]
      return lambda inputs: boosted_trees_ops.boosted_trees_bucketize(  # pylint: disable=g-long-lambda
          float_values=[gen_math_ops.cast(inputs, dtypes.float32)],
          bucket_boundaries=bins)[0]

    if tf_utils.is_ragged(inputs):
      integer_buckets = ragged_functional_ops.map_flat_values(
          _bucketize_op(array_ops.squeeze(self.bins)),
          inputs)
      # Ragged map_flat_values doesn't touch the non-values tensors in the
      # ragged composite tensor. If this op is the only op a Keras model,
      # this can cause errors in Graph mode, so wrap the tensor in an identity.
      return array_ops.identity(integer_buckets)
    elif isinstance(inputs, sparse_tensor.SparseTensor):
      integer_buckets = boosted_trees_ops.boosted_trees_bucketize(
          [gen_math_ops.cast(inputs.values, dtypes.float32)],
          bucket_boundaries=[gen_math_ops.cast(array_ops.squeeze(self.bins),
                                               dtypes.float32)])[0]
      return sparse_tensor.SparseTensor(
          indices=array_ops.identity(inputs.indices),
          values=integer_buckets,
          dense_shape=array_ops.identity(inputs.dense_shape))
    else:
      input_shape = inputs.get_shape()
      if any(dim is None for dim in input_shape.as_list()[1:]):
        raise NotImplementedError(
            "Discretization Layer requires known non-batch shape,"
            "found {}".format(input_shape))

      reshaped = array_ops.reshape(
          inputs, [-1, gen_math_ops.prod(input_shape.as_list()[1:], axis=0)])

      return array_ops.reshape(
          control_flow_ops.vectorized_map(
              _bucketize_op(array_ops.squeeze(self.bins)), reshaped),
          array_ops.constant([-1] + input_shape.as_list()[1:]))
Ejemplo n.º 13
0
def batch_scatter(indices, updates, shape, name=None):
    with ops.name_scope(name):
        indices = ops.convert_to_tensor(indices, name="indices")
        indices_shape = array_ops.shape(indices)
        indices_dimensions = indices.get_shape().ndims

        if indices_dimensions is None:
            raise ValueError(
                "batch_gather does not allow indices with unknown "
                "shape.")

        nd_indices = array_ops.expand_dims(indices, axis=-1)
        nd_indices_list = []

        # Scatter ND requires indices to have an additional dimension, in which the
        # coordinates of the updated things are specified. For this to be adapted to
        # the scatter_update with several leading dimensions, we simply make use of
        # a tf.range for all the leading dimensions followed by concat of all the
        # coordinates we created with the original indices.

        # For example if indices.shape = [2, 3], we should generate the following
        # indices for tf.scatter_nd_update:
        # nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]]
        # nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]]
        # nd_indices[:, :, 2] = indices
        for dimension in range(indices_dimensions - 1):
            # In this loop we generate the following for the example (one for each
            # iteration).
            # nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]]
            # nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]]
            # This is done at every iteration with a tf.range over the size of the
            # i-th dimension and using broadcasting over the desired shape.
            dimension_size = indices_shape[dimension]
            shape_to_broadcast = [1] * (indices_dimensions + 1)
            shape_to_broadcast[dimension] = dimension_size
            dimension_range = array_ops.reshape(
                gen_math_ops._range(0, dimension_size, 1), shape_to_broadcast)
            if dimension_range.dtype.base_dtype != nd_indices.dtype:
                dimension_range = gen_math_ops.cast(dimension_range,
                                                    nd_indices.dtype)
            nd_indices_list.append(dimension_range *
                                   array_ops.ones_like(nd_indices))
        # Add the original indices at the end, as described above, and concat.
        nd_indices_list.append(nd_indices)
        final_indices = array_ops.concat(nd_indices_list, axis=-1)
        return tf.scatter_nd(final_indices, updates, shape)
Ejemplo n.º 14
0
def mpc_cast(x, dtype, name=None):
    """Casts a tensor from float64 to a float64 type, only for BP

  Args:
    x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
      be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
      `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
      `bfloat16`.
    dtype: The destination type. The list of supported dtypes is the same as
      `x`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
      same type as `dtype`.

  Raises:
    TypeError: If `x` cannot be cast to the `dtype`.
  """

    base_type = dtypes.as_dtype(dtype).base_dtype

    with ops.name_scope(name, "Cast", [x]) as name:
        if isinstance(x, sparse_tensor.SparseTensor):
            values_cast = tf.cast(x.values, base_type, name=name)
            x = sparse_tensor.SparseTensor(x.indices, values_cast,
                                           x.dense_shape)
        elif isinstance(x, ops.IndexedSlices):
            values_cast = tf.cast(x.values, base_type, name=name)
            x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)
        else:
            # TODO(josh11b): If x is not already a Tensor, we could return
            # ops.convert_to_tensor(x, dtype=dtype, ...)  here, but that
            # allows some conversions that cast() can't do, e.g. casting numbers to
            # strings.
            x = ops.convert_to_tensor(x, name="x")
            x = gen_math_ops.cast(x, base_type, name=name)

        return x
Ejemplo n.º 15
0
def cast(x, dtype, name=None):
  """Casts a tensor to a new type.

  The operation casts `x` (in case of `Tensor`) or `x.values`
  (in case of `SparseTensor`) to `dtype`.

  For example:

  ```python
  # tensor `a` is [1.8, 2.2], dtype=tf.float
  tf.cast(a, tf.int32) ==> [1, 2]  # dtype=tf.int32
  ```

  Args:
    x: A `Tensor` or `SparseTensor`.
    dtype: The destination type.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` or `SparseTensor` with same shape as `x`.

  Raises:
    TypeError: If `x` cannot be cast to the `dtype`.
  """
  with ops.op_scope([x], name, "Cast") as name:
    if isinstance(x, ops.SparseTensor):
      values_cast = cast(x.values, dtype, name=name)
      return ops.SparseTensor(x.indices, values_cast, x.shape)
    else:
      # TODO(touts): Handle what Josh said.
      #
      # Could return ops.convert_to_tensor(x, dtype=dtype, ...)  here, but that
      # allows some conversions that cast() can't do, e.g.  casting numbers to
      # strings.
      x = ops.convert_to_tensor(x, name="x")
      if x.dtype.base_dtype == dtype:
        return x
      return gen_math_ops.cast(x, dtype, name=name)
Ejemplo n.º 16
0
 def call(self, inputs):
   inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
   inputs = gen_math_ops.cast(inputs, dtypes.float32)
   outputs = gen_math_ops.mat_mul(inputs, self.kernel)
   outputs = nn.bias_add(outputs, self.bias)
   return gen_math_ops.cos(outputs)
Ejemplo n.º 17
0
 def call(self, inputs):
   inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
   inputs = gen_math_ops.cast(inputs, dtypes.float32)
   outputs = gen_math_ops.mat_mul(inputs, self.kernel)
   outputs = nn.bias_add(outputs, self.bias)
   return gen_math_ops.cos(outputs)
Ejemplo n.º 18
0
def batch_scatter_update(ref, indices, updates, use_locking=True, name=None):
    """Generalization of `tf.scatter_update` to axis different than 0.

  Analogous to `batch_gather`. This assumes that `ref`, `indices` and `updates`
  have a series of leading dimensions that are the same for all of them, and the
  updates are performed on the last dimension of indices. In other words, the
  dimensions should be the following:

  `num_prefix_dims = indices.ndims - 1`
  `batch_dim = num_prefix_dims + 1`
  `updates.shape = indices.shape + var.shape[batch_dim:]`

  where

  `updates.shape[:num_prefix_dims]`
  `== indices.shape[:num_prefix_dims]`
  `== var.shape[:num_prefix_dims]`

  And the operation performed can be expressed as:

  `var[i_1, ..., i_n, indices[i_1, ..., i_n, j]] = updates[i_1, ..., i_n, j]`

  When indices is a 1D tensor, this operation is equivalent to
  `tf.scatter_update`.

  To avoid this operation there would be 2 alternatives:
  1) Reshaping the variable by merging the first `ndims` dimensions. However,
     this is not possible because `tf.reshape` returns a Tensor, which we
     cannot use `tf.scatter_update` on.
  2) Looping over the first `ndims` of the variable and using
     `tf.scatter_update` on the subtensors that result of slicing the first
     dimension. This is a valid option for `ndims = 1`, but less efficient than
     this implementation.

  See also `tf.scatter_update` and `tf.scatter_nd_update`.

  Args:
    ref: `Variable` to scatter onto.
    indices: Tensor containing indices as described above.
    updates: Tensor of updates to apply to `ref`.
    use_locking: Boolean indicating whether to lock the writing operation.
    name: Optional scope name string.

  Returns:
    Ref to `variable` after it has been modified.

  Raises:
    ValueError: If the initial `ndims` of `ref`, `indices`, and `updates` are
        not the same.
  """
    with ops.name_scope(name):
        indices = ops.convert_to_tensor(indices, name="indices")
        indices_shape = array_ops.shape(indices)
        indices_dimensions = indices.get_shape().ndims

        if indices_dimensions is None:
            raise ValueError(
                "batch_gather does not allow indices with unknown "
                "shape.")

        nd_indices = array_ops.expand_dims(indices, axis=-1)
        nd_indices_list = []

        # Scatter ND requires indices to have an additional dimension, in which the
        # coordinates of the updated things are specified. For this to be adapted to
        # the scatter_update with several leading dimensions, we simply make use of
        # a tf.range for all the leading dimensions followed by concat of all the
        # coordinates we created with the original indices.

        # For example if indices.shape = [2, 3, 4], we should generate the following
        # indices for tf.scatter_nd_update:
        # nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]]
        # nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]]
        # nd_indices[:, :, 2] = indices
        for dimension in range(indices_dimensions - 1):
            # In this loop we generate the following for the example (one for each
            # iteration).
            # nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]]
            # nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]]
            # This is done at every iteration with a tf.range over the size of the
            # i-th dimension and using broadcasting over the desired shape.
            dimension_size = indices_shape[dimension]
            shape_to_broadcast = [1] * (indices_dimensions + 1)
            shape_to_broadcast[dimension] = dimension_size
            dimension_range = array_ops.reshape(
                gen_math_ops._range(0, dimension_size, 1), shape_to_broadcast)
            if dimension_range.dtype.base_dtype != nd_indices.dtype:
                dimension_range = gen_math_ops.cast(dimension_range,
                                                    nd_indices.dtype)
            nd_indices_list.append(dimension_range *
                                   array_ops.ones_like(nd_indices))
        # Add the original indices at the end, as described above, and concat.
        nd_indices_list.append(nd_indices)
        final_indices = array_ops.concat(nd_indices_list, axis=-1)
        return scatter_nd_update(ref,
                                 final_indices,
                                 updates,
                                 use_locking=use_locking)
Ejemplo n.º 19
0
 def _bucketize_op(bins):
   bins = [gen_math_ops.cast(bins, dtypes.float32)]
   return lambda inputs: boosted_trees_ops.boosted_trees_bucketize(  # pylint: disable=g-long-lambda
       float_values=[gen_math_ops.cast(inputs, dtypes.float32)],
       bucket_boundaries=bins)[0]
Ejemplo n.º 20
0
def batch_gather(params, indices, axis, name=None):
    """
    Extension of the batch_gather function in tensorflow
    (see https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/ops/array_ops.py
    or https://www.tensorflow.org/api_docs/python/tf/batch_gather)
    Gather slices from `params` according to `indices` with leading batch dims.
    This operation assumes that the leading dimensions of `indices` are dense,
    and the gathers on the axis corresponding to the last dimension of `indices`.
    More concretely it computes:
    `result[i1, ..., in, j1, ..., jm, k1, ...., kl] = params[i1, ..., in, indices[i1, ..., in, j1, ..., jm], k1, ..., kl]`
    Therefore `params` should be a Tensor of shape [A1, ..., AN, C0, B1, ..., BM],
    `indices` should be a Tensor of shape [A1, ..., AN, C1, ..., CK] and `result` will be
    a Tensor of size `[A1, ..., AN, C1, ..., CK, B1, ..., BM]`.
    In the case in which indices is a 1D tensor, this operation is equivalent to
    `tf.gather`.
    See also `tf.gather` and `tf.gather_nd`.
    Args:
      params: A `Tensor`. The tensor from which to gather values.
      indices: A `Tensor`. Must be one of the following types: int32, int64. Index
          tensor. Must be in range `[0, params.shape[axis]`, where `axis` is the
          last dimension of `indices` itself.
      axis: A `Tensor`. Must be one of the following types: int32, int64. The axis
            in `params` to gather `indices` from.
      name: A name for the operation (optional).
    Returns:
      A Tensor. Has the same type as `params`.
    Raises:
      ValueError: if `indices` has an unknown shape.
    """

    with ops.name_scope(name):
        indices = ops.convert_to_tensor(indices, name="indices")
        params = ops.convert_to_tensor(params, name="params")
        indices_shape = tf.shape(indices)
        params_shape = tf.shape(params)

        ndims = indices.shape.ndims
        if ndims is None:
            raise ValueError(
                "batch_gather does not allow indices with unknown "
                "shape.")
        batch_indices = indices
        indices_dtype = indices.dtype.base_dtype
        accum_dim_value = tf.ones((), dtype=indices_dtype)
        # Use correct type for offset index computation
        casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
        for dim in range(axis, 0, -1):
            dim_value = casted_params_shape[dim - 1]
            accum_dim_value *= casted_params_shape[dim]
            start = tf.zeros((), dtype=indices_dtype)
            step = tf.ones((), dtype=indices_dtype)
            dim_indices = gen_math_ops._range(start, dim_value, step)
            dim_indices *= accum_dim_value
            dim_shape = tf.stack([1] * (dim - 1) + [dim_value] + [1] *
                                 (ndims - dim),
                                 axis=0)
            batch_indices += tf.reshape(dim_indices, dim_shape)

        flat_inner_shape_indices = gen_math_ops.prod(
            indices_shape[:(axis + 1)], [0], False)
        flat_indices = tf.reshape(
            batch_indices,
            tf.concat([[flat_inner_shape_indices], indices_shape[(axis + 1):]],
                      axis=0))
        outer_shape = params_shape[(axis + 1):]
        flat_inner_shape_params = gen_math_ops.prod(params_shape[:(axis + 1)],
                                                    [0], False)

        flat_params = tf.reshape(
            params, tf.concat([[flat_inner_shape_params], outer_shape],
                              axis=0))
        flat_result = tf.gather(flat_params, flat_indices)
        result = tf.reshape(flat_result,
                            tf.concat([indices_shape, outer_shape], axis=0))
        final_shape = indices.get_shape()[:axis].merge_with(
            params.get_shape()[:axis])
        final_shape = final_shape.concatenate(indices.get_shape()[axis:])
        final_shape = final_shape.concatenate(params.get_shape()[(axis + 1):])
        result.set_shape(final_shape)
        return result
Ejemplo n.º 21
0
def batch_scatter_update(ref, indices, updates, use_locking=True, name=None):
  """Generalization of `tf.scatter_update` to axis different than 0.

  Analogous to `batch_gather`. This assumes that `ref`, `indices` and `updates`
  have a series of leading dimensions that are the same for all of them, and the
  updates are performed on the last dimension of indices. In other words, the
  dimensions should be the following:

  `num_prefix_dims = indices.ndims - 1`
  `batch_dim = num_prefix_dims + 1`
  `updates.shape = indices.shape + var.shape[batch_dim:]`

  where

  `updates.shape[:num_prefix_dims]`
  `== indices.shape[:num_prefix_dims]`
  `== var.shape[:num_prefix_dims]`

  And the operation performed can be expressed as:

  `var[i_1, ..., i_n, indices[i_1, ..., i_n, j]] = updates[i_1, ..., i_n, j]`

  When indices is a 1D tensor, this operation is equivalent to
  `tf.scatter_update`.

  To avoid this operation there would be 2 alternatives:
  1) Reshaping the variable by merging the first `ndims` dimensions. However,
     this is not possible because `tf.reshape` returns a Tensor, which we
     cannot use `tf.scatter_update` on.
  2) Looping over the first `ndims` of the variable and using
     `tf.scatter_update` on the subtensors that result of slicing the first
     dimension. This is a valid option for `ndims = 1`, but less efficient than
     this implementation.

  See also `tf.scatter_update` and `tf.scatter_nd_update`.

  Args:
    ref: `Variable` to scatter onto.
    indices: Tensor containing indices as described above.
    updates: Tensor of updates to apply to `ref`.
    use_locking: Boolean indicating whether to lock the writing operation.
    name: Optional scope name string.

  Returns:
    Ref to `variable` after it has been modified.

  Raises:
    ValueError: If the initial `ndims` of `ref`, `indices`, and `updates` are
        not the same.
  """
  with ops.name_scope(name):
    indices = ops.convert_to_tensor(indices, name="indices")
    indices_shape = array_ops.shape(indices)
    indices_dimensions = indices.get_shape().ndims

    if indices_dimensions is None:
      raise ValueError("batch_gather does not allow indices with unknown "
                       "shape.")

    nd_indices = array_ops.expand_dims(indices, axis=-1)
    nd_indices_list = []

    # Scatter ND requires indices to have an additional dimension, in which the
    # coordinates of the updated things are specified. For this to be adapted to
    # the scatter_update with several leading dimensions, we simply make use of
    # a tf.range for all the leading dimensions followed by concat of all the
    # coordinates we created with the original indices.

    # For example if indices.shape = [2, 3, 4], we should generate the following
    # indices for tf.scatter_nd_update:
    # nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]]
    # nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]]
    # nd_indices[:, :, 2] = indices
    for dimension in range(indices_dimensions - 1):
      # In this loop we generate the following for the example (one for each
      # iteration).
      # nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]]
      # nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]]
      # This is done at every iteration with a tf.range over the size of the
      # i-th dimension and using broadcasting over the desired shape.
      dimension_size = indices_shape[dimension]
      shape_to_broadcast = [1] * (indices_dimensions + 1)
      shape_to_broadcast[dimension] = dimension_size
      dimension_range = array_ops.reshape(
          gen_math_ops._range(0, dimension_size, 1), shape_to_broadcast)
      if dimension_range.dtype.base_dtype != nd_indices.dtype:
        dimension_range = gen_math_ops.cast(dimension_range, nd_indices.dtype)
      nd_indices_list.append(
          dimension_range * array_ops.ones_like(nd_indices))
    # Add the original indices at the end, as described above, and concat.
    nd_indices_list.append(nd_indices)
    final_indices = array_ops.concat(nd_indices_list, axis=-1)
    return scatter_nd_update(
        ref, final_indices, updates, use_locking=use_locking)