Ejemplo n.º 1
0
def _BatchGatherGrad(params_shape, values, indices, batch_dims,
                     gather_dim_size):
    """Returns the gradient of GatherV2 with batch dimensions."""

    # Axis is the first non-batch dimension.
    indices_size = array_ops.expand_dims(array_ops.size(indices), 0)
    if batch_dims:
        values_shape = array_ops.shape(values)
        # Add the batch offsets to indices and flatten the batch dimensions.
        outer_shape = values_shape[:batch_dims]
        inner_shape = values_shape[batch_dims:][1:]
        batch_size = gen_math_ops.prod(outer_shape, [0], False)
        flat_values_shape = array_ops.concat([[-1], inner_shape], 0)
        gather_dim_size *= batch_size

        indices = _GetBatchIndices(params_shape, indices, batch_dims)
        values = array_ops.reshape(_IndexedSlicesToTensorNoWarning(values),
                                   flat_values_shape)

    indices = array_ops.reshape(indices, indices_size)
    params_grad = math_ops.unsorted_segment_sum(values, indices,
                                                gather_dim_size)

    if batch_dims:
        # Put back the batch dimensions.
        params_grad = array_ops.reshape(
            params_grad, array_ops.concat([outer_shape, flat_values_shape], 0))

    return params_grad
Ejemplo n.º 2
0
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
    if context.executing_eagerly() and not isinstance(
            input,
        (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
        input = ops.convert_to_tensor(input)
        np_out_type = out_type.as_numpy_dtype
        num_elements = np.prod(input._shape_tuple(), dtype=np_out_type)
        return ops.convert_to_tensor(num_elements, dtype=out_type)
    with ops.name_scope(name, "Size", [input]) as name:
        if isinstance(
                input,
            (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
            return gen_math_ops.prod(gen_math_ops.cast(input.dense_shape,
                                                       out_type),
                                     0,
                                     name=name)
        else:
            input_tensor = ops.convert_to_tensor(input)
            input_shape = input_tensor.get_shape()
            if optimize:
                if input_shape.is_fully_defined():
                    return constant(input_shape.num_elements(),
                                    out_type,
                                    name=name)
                if input_shape.dims and any(dim == 0
                                            for dim in input_shape.dims):
                    return constant(0, out_type, name=name)
            return gen_array_ops.size(input, name=name, out_type=out_type)
Ejemplo n.º 3
0
def batch_gather(params, indices, name=None):
    """Gather slices from `params` according to `indices` with leading batch dims.
    This operation assumes that the leading dimensions of `indices` are dense,
    and the gathers on the axis corresponding to the last dimension of `indices`.
    More concretely it computes:
    result[i1, ..., in] = params[i1, ..., in-1, indices[i1, ..., in]]
    Therefore `params` should be a Tensor of shape [A1, ..., AN, B1, ..., BM],
    `indices` should be a Tensor of shape [A1, ..., AN-1, C] and `result` will be
    a Tensor of size `[A1, ..., AN-1, C, B1, ..., BM]`.
    In the case in which indices is a 1D tensor, this operation is equivalent to
    `tf.gather`.
    See also `tf.gather` and `tf.gather_nd`.
    Args:
      params: A Tensor. The tensor from which to gather values.
      indices: A Tensor. Must be one of the following types: int32, int64. Index
          tensor. Must be in range `[0, params.shape[axis]`, where `axis` is the
          last dimension of `indices` itself.
      name: A name for the operation (optional).
    Returns:
      A Tensor. Has the same type as `params`.
    Raises:
      ValueError: if `indices` has an unknown shape.
    """

    with tf.name_scope(name):
        indices = tf.convert_to_tensor(indices, name="indices")
        params = tf.convert_to_tensor(params, name="params")
        indices_shape = tf.shape(indices)
        params_shape = tf.shape(params)
        ndims = indices.shape.ndims
        if ndims is None:
            raise ValueError("batch_gather does not allow indices with unknown "
                             "shape.")
        batch_indices = indices
        accum_dim_value = 1
        for dim in range(ndims-1, 0, -1):
            dim_value = params_shape[dim-1]
            accum_dim_value *= params_shape[dim]
            dim_indices = tf.range(0, dim_value, 1)
            dim_indices *= accum_dim_value
            dim_shape = tf.stack([1] * (dim - 1) + [dim_value] + [1] * (ndims - dim),
                                 axis=0)
            batch_indices += tf.reshape(dim_indices, dim_shape)

        flat_indices = tf.reshape(batch_indices, [-1])
        outer_shape = params_shape[ndims:]
        flat_inner_shape = gen_math_ops.prod(
            params_shape[:ndims], [0], False)

        flat_params = tf.reshape(
            params, tf.concat([[flat_inner_shape], outer_shape], axis=0))
        flat_result = tf.gather(flat_params, flat_indices)
        result = tf.reshape(flat_result, tf.concat(
            [indices_shape, outer_shape], axis=0))
        final_shape = indices.get_shape()[:ndims-1].merge_with(
            params.get_shape()[:ndims - 1])
        final_shape = final_shape.concatenate(indices.get_shape()[ndims-1])
        final_shape = final_shape.concatenate(params.get_shape()[ndims:])
        result.set_shape(final_shape)
        return result
Ejemplo n.º 4
0
    def call(self, inputs):
        def _bucketize_op(bins):
            bins = [math_ops.cast(bins, dtypes.float32)]
            return lambda inputs: gen_boosted_trees_ops.BoostedTreesBucketize(  # pylint: disable=g-long-lambda
                float_values=[math_ops.cast(inputs, dtypes.float32)],
                bucket_boundaries=bins)[0]

        if tf_utils.is_ragged(inputs):
            integer_buckets = ragged_functional_ops.map_flat_values(
                _bucketize_op(array_ops.squeeze(self.bins)), inputs)
            # Ragged map_flat_values doesn't touch the non-values tensors in the
            # ragged composite tensor. If this op is the only op a Keras model,
            # this can cause errors in Graph mode, so wrap the tensor in an identity.
            return array_ops.identity(integer_buckets)
        elif isinstance(inputs, sparse_tensor.SparseTensor):
            integer_buckets = gen_boosted_trees_ops.BoostedTreesBucketize(
                float_values=[math_ops.cast(inputs.values, dtypes.float32)],
                bucket_boundaries=[
                    math_ops.cast(array_ops.squeeze(self.bins), dtypes.float32)
                ])[0]
            return sparse_tensor.SparseTensor(
                indices=array_ops.identity(inputs.indices),
                values=integer_buckets,
                dense_shape=array_ops.identity(inputs.dense_shape))
        else:
            input_shape = inputs.get_shape()
            if any(dim is None for dim in input_shape.as_list()[1:]):
                raise NotImplementedError(
                    "Discretization Layer requires known non-batch shape,"
                    "found {}".format(input_shape))

            reshaped = array_ops.reshape(
                inputs,
                [-1, gen_math_ops.prod(input_shape.as_list()[1:], axis=0)])

            return array_ops.reshape(
                control_flow_ops.vectorized_map(
                    _bucketize_op(array_ops.squeeze(self.bins)), reshaped),
                array_ops.constant([-1] + input_shape.as_list()[1:]))
def batch_gather(params, indices, axis, name=None):
    """
    Extension of the batch_gather function in tensorflow
    (see https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/ops/array_ops.py
    or https://www.tensorflow.org/api_docs/python/tf/batch_gather)
    Gather slices from `params` according to `indices` with leading batch dims.
    This operation assumes that the leading dimensions of `indices` are dense,
    and the gathers on the axis corresponding to the last dimension of `indices`.
    More concretely it computes:
    `result[i1, ..., in, j1, ..., jm, k1, ...., kl] = params[i1, ..., in, indices[i1, ..., in, j1, ..., jm], k1, ..., kl]`
    Therefore `params` should be a Tensor of shape [A1, ..., AN, C0, B1, ..., BM],
    `indices` should be a Tensor of shape [A1, ..., AN, C1, ..., CK] and `result` will be
    a Tensor of size `[A1, ..., AN, C1, ..., CK, B1, ..., BM]`.
    In the case in which indices is a 1D tensor, this operation is equivalent to
    `tf.gather`.
    See also `tf.gather` and `tf.gather_nd`.
    Args:
      params: A `Tensor`. The tensor from which to gather values.
      indices: A `Tensor`. Must be one of the following types: int32, int64. Index
          tensor. Must be in range `[0, params.shape[axis]`, where `axis` is the
          last dimension of `indices` itself.
      axis: A `Tensor`. Must be one of the following types: int32, int64. The axis
            in `params` to gather `indices` from.
      name: A name for the operation (optional).
    Returns:
      A Tensor. Has the same type as `params`.
    Raises:
      ValueError: if `indices` has an unknown shape.
    """

    with ops.name_scope(name):
        indices = ops.convert_to_tensor(indices, name="indices")
        params = ops.convert_to_tensor(params, name="params")
        indices_shape = tf.shape(indices)
        params_shape = tf.shape(params)

        ndims = indices.shape.ndims
        if ndims is None:
            raise ValueError(
                "batch_gather does not allow indices with unknown "
                "shape.")
        batch_indices = indices
        indices_dtype = indices.dtype.base_dtype
        accum_dim_value = tf.ones((), dtype=indices_dtype)
        # Use correct type for offset index computation
        casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
        for dim in range(axis, 0, -1):
            dim_value = casted_params_shape[dim - 1]
            accum_dim_value *= casted_params_shape[dim]
            start = tf.zeros((), dtype=indices_dtype)
            step = tf.ones((), dtype=indices_dtype)
            dim_indices = gen_math_ops._range(start, dim_value, step)
            dim_indices *= accum_dim_value
            dim_shape = tf.stack([1] * (dim - 1) + [dim_value] + [1] *
                                 (ndims - dim),
                                 axis=0)
            batch_indices += tf.reshape(dim_indices, dim_shape)

        flat_inner_shape_indices = gen_math_ops.prod(
            indices_shape[:(axis + 1)], [0], False)
        flat_indices = tf.reshape(
            batch_indices,
            tf.concat([[flat_inner_shape_indices], indices_shape[(axis + 1):]],
                      axis=0))
        outer_shape = params_shape[(axis + 1):]
        flat_inner_shape_params = gen_math_ops.prod(params_shape[:(axis + 1)],
                                                    [0], False)

        flat_params = tf.reshape(
            params, tf.concat([[flat_inner_shape_params], outer_shape],
                              axis=0))
        flat_result = tf.gather(flat_params, flat_indices)
        result = tf.reshape(flat_result,
                            tf.concat([indices_shape, outer_shape], axis=0))
        final_shape = indices.get_shape()[:axis].merge_with(
            params.get_shape()[:axis])
        final_shape = final_shape.concatenate(indices.get_shape()[axis:])
        final_shape = final_shape.concatenate(params.get_shape()[(axis + 1):])
        result.set_shape(final_shape)
        return result