Exemplo n.º 1
0
 def f(x):
     pi_x = x * np.pi
     return array_ops.where_v2(x == 0, array_ops.ones_like(x),
                               math_ops.sin(pi_x) / pi_x)
Exemplo n.º 2
0
 def f(x):
     # __pow__ can't handle negative base, so we use `abs` here.
     rt = math_ops.abs(x)**(1.0 / 3)
     return array_ops.where_v2(x < 0, -rt, rt)
Exemplo n.º 3
0
 def f(x):
     return array_ops.where_v2(x < 0, math_ops.ceil(x), math_ops.floor(x))
Exemplo n.º 4
0
 def f(x1, x2):
     return array_ops.where_v2(
         x1 < 0, constant_op.constant(0, dtype=x2.dtype),
         array_ops.where_v2(x1 > 0, constant_op.constant(1, dtype=x2.dtype),
                            x2))
Exemplo n.º 5
0
 def f(x):
     if x.dtype in _tf_float_types:
         # Workaround for b/147515503
         return array_ops.where_v2(x < 0, np.pi, 0)
     else:
         return math_ops.angle(x)
Exemplo n.º 6
0
 def false_fn():
     return MaskedTensorV1(
         array_ops.where_v2(mt.mask, 100, mt.values * 2),
         math_ops.logical_not(mt.mask))
Exemplo n.º 7
0
 def with_default(self, default):
     return array_ops.where_v2(self.mask, self.values, default)
Exemplo n.º 8
0
 def loop_fn(i):
     a_i = array_ops.gather(a, i)
     b_i = array_ops.gather(b, i)
     return array_ops.where_v2(cond, a_i, b_i)
Exemplo n.º 9
0
 def true_fn():
     return MaskedTensorV1(array_ops.where_v2(mt.mask, mt.values, -1),
                           mt.values > 3)
Exemplo n.º 10
0
def norm(tensor,
         ord='euclidean',
         axis=None,
         keepdims=None,
         name=None,
         keep_dims=None):
  r"""Computes the norm of vectors, matrices, and tensors.

  This function can compute several different vector norms (the 1-norm, the
  Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
  matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).

  Args:
    tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
    ord: Order of the norm. Supported values are 'fro', 'euclidean',
      `1`, `2`, `np.inf` and any positive real number yielding the corresponding
      p-norm. Default is 'euclidean' which is equivalent to Frobenius norm if
      `tensor` is a matrix and equivalent to 2-norm for vectors.
      Some restrictions apply:
        a) The Frobenius norm `fro` is not defined for vectors,
        b) If axis is a 2-tuple (matrix norm), only 'euclidean', 'fro', `1`,
           `2`, `np.inf` are supported.
      See the description of `axis` on how to compute norms for a batch of
      vectors or matrices stored in a tensor.
    axis: If `axis` is `None` (the default), the input is considered a vector
      and a single vector norm is computed over the entire set of values in the
      tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
      `norm(reshape(tensor, [-1]), ord=ord)`.
      If `axis` is a Python integer, the input is considered a batch of vectors,
      and `axis` determines the axis in `tensor` over which to compute vector
      norms.
      If `axis` is a 2-tuple of Python integers it is considered a batch of
      matrices and `axis` determines the axes in `tensor` over which to compute
      a matrix norm.
      Negative indices are supported. Example: If you are passing a tensor that
      can be either a matrix or a batch of matrices at runtime, pass
      `axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
      computed.
    keepdims: If True, the axis indicated in `axis` are kept with size 1.
      Otherwise, the dimensions in `axis` are removed from the output shape.
    name: The name of the op.
    keep_dims: Deprecated alias for `keepdims`.

  Returns:
    output: A `Tensor` of the same type as tensor, containing the vector or
      matrix norms. If `keepdims` is True then the rank of output is equal to
      the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar,
      if `axis` is an integer, the rank of `output` is one less than the rank
      of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less
      than the rank of `tensor`.

  Raises:
    ValueError: If `ord` or `axis` is invalid.

  @compatibility(numpy)
  Mostly equivalent to numpy.linalg.norm.
  Not supported: ord <= 0, 2-norm for matrices, nuclear norm.
  Other differences:
    a) If axis is `None`, treats the flattened `tensor` as a vector
     regardless of rank.
    b) Explicitly supports 'euclidean' norm as the default, including for
     higher order tensors.
  @end_compatibility
  """
  keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims,
                                                    'keep_dims', keep_dims)
  if keepdims is None:
    keepdims = False

  is_matrix_norm = ((isinstance(axis, tuple) or isinstance(axis, list)) and
                    len(axis) == 2)
  if is_matrix_norm:
    axis = tuple(axis)
    if (not isinstance(axis[0], int) or not isinstance(axis[1], int) or
        axis[0] == axis[1]):
      raise ValueError(
          "'axis' must be None, an integer, or a tuple of 2 unique integers")
    supported_matrix_norms = ['euclidean', 'fro', 1, 2, np.inf]
    if ord not in supported_matrix_norms:
      raise ValueError("'ord' must be a supported matrix norm in %s, got %s" %
                       (supported_matrix_norms, ord))
  else:
    if not (isinstance(axis, int) or axis is None):
      raise ValueError(
          "'axis' must be None, an integer, or a tuple of 2 unique integers")

    supported_vector_norms = ['euclidean', 1, 2, np.inf]
    if (not np.isreal(ord) or ord <= 0) and ord not in supported_vector_norms:
      raise ValueError("'ord' must be a supported vector norm, got %s" % ord)
    if axis is not None:
      axis = (axis,)

  with ops.name_scope(name, 'norm', [tensor]):
    tensor = ops.convert_to_tensor(tensor)

    if ord in ['fro', 'euclidean', 2, 2.0]:
      if is_matrix_norm and ord in [2, 2.0]:
        rank = array_ops.rank(tensor)
        positive_axis = map_fn.map_fn(
            lambda i: control_flow_ops.cond(i >= 0, lambda: i, lambda: i + rank),
            ops.convert_to_tensor(axis))
        axes = math_ops.range(rank)
        perm_before = array_ops.concat(
            [array_ops.setdiff1d(axes, positive_axis)[0], positive_axis],
            axis=0)
        perm_after = map_fn.map_fn(
            lambda i: math_ops.cast(
                array_ops.squeeze(
                    array_ops.where_v2(math_ops.equal(perm_before, i))),
                dtype=dtypes.int32), axes)
        permed = array_ops.transpose(tensor, perm=perm_before)
        matrix_2_norm = array_ops.expand_dims(
            math_ops.reduce_max(
                math_ops.abs(gen_linalg_ops.svd(permed, compute_uv=False)[0]),
                axis=-1,
                keepdims=True),
            axis=-1)
        result = array_ops.transpose(matrix_2_norm, perm=perm_after)
      else:
        result = math_ops.sqrt(
            math_ops.reduce_sum(
                tensor * math_ops.conj(tensor), axis, keepdims=True))
        # TODO(rmlarsen): Replace with the following, once gradients are defined
        # result = math_ops.reduce_euclidean_norm(tensor, axis, keepdims=True)
    else:
      result = math_ops.abs(tensor)
      if ord == 1:
        sum_axis = None if axis is None else axis[0]
        result = math_ops.reduce_sum(result, sum_axis, keepdims=True)
        if is_matrix_norm:
          result = math_ops.reduce_max(result, axis[-1], keepdims=True)
      elif ord == np.inf:
        if is_matrix_norm:
          result = math_ops.reduce_sum(result, axis[1], keepdims=True)
        max_axis = None if axis is None else axis[0]
        result = math_ops.reduce_max(result, max_axis, keepdims=True)
      else:
        # General p-norms (positive p only)
        result = math_ops.pow(
            math_ops.reduce_sum(math_ops.pow(result, ord), axis, keepdims=True),
            1.0 / ord)
    if not keepdims:
      result = array_ops.squeeze(result, axis)
    return result
Exemplo n.º 11
0
  def testCtcLossDenseUniqueFastPathWithBlankIndexIsSameAsCtcLoss(self):
    random_seed.set_random_seed(5)

    batch_size = 8
    num_labels = 6
    label_length = 5
    num_frames = 12
    logits = random_ops.random_uniform([num_frames, batch_size, num_labels])
    labels = random_ops.random_uniform([batch_size, label_length],
                                       minval=0,
                                       maxval=num_labels - 1,
                                       dtype=dtypes.int64)

    label_lengths = random_ops.random_uniform([batch_size],
                                              minval=2,
                                              maxval=label_length,
                                              dtype=dtypes.int64)
    label_mask = array_ops.sequence_mask(
        label_lengths, maxlen=label_length, dtype=label_lengths.dtype)
    labels *= label_mask

    logit_lengths = [num_frames] * batch_size

    tf_ctc_loss_labels = math_ops.cast(labels, dtypes.int32)
    tf_ctc_loss_labels = ctc_ops.dense_labels_to_sparse(tf_ctc_loss_labels,
                                                        label_lengths)

    tf_nn_ctc_loss = ctc_ops.ctc_loss(
        labels=tf_ctc_loss_labels,
        inputs=logits,
        sequence_length=logit_lengths,
        time_major=True)
    tf_nn_ctc_grads = gradients_impl.gradients(tf_nn_ctc_loss, [logits])[0]

    # Shift the blank logits/labels to be somewhere in the middle.
    blank_index = 2
    shifted_logits = array_ops.concat([
        logits[:, :, :blank_index],
        logits[:, :, -1:],
        logits[:, :, blank_index:-1],
    ],
                                      axis=2)
    shifted_labels = array_ops.where_v2(labels < blank_index, labels,
                                        labels + 1)

    ctc_loss = ctc_ops.ctc_loss_dense(
        labels=shifted_labels,
        logits=shifted_logits,
        label_length=label_lengths,
        logit_length=logit_lengths,
        blank_index=blank_index,
        unique=ctc_ops.ctc_unique_labels(shifted_labels))
    ctc_loss_grads = gradients_impl.gradients(ctc_loss, [logits])[0]

    with self.cached_session() as sess:
      for _ in range(32):
        self.assertAllClose(*self.evaluate([ctc_loss, tf_nn_ctc_loss]))
        self.assertAllClose(
            *self.evaluate([ctc_loss_grads, tf_nn_ctc_grads]),
            rtol=2e-06,
            atol=2e-06)
Exemplo n.º 12
0
    def training_graph(self,
                       input_data,
                       input_labels,
                       num_trainers=1,
                       trainer_id=0,
                       **tree_kwargs):
        """Constructs a TF graph for training a random forest.

    Args:
      input_data: A tensor or dict of string->Tensor for input data.
      input_labels: A tensor or placeholder for labels associated with
        input_data.
      num_trainers: Number of parallel trainers to split trees among.
      trainer_id: Which trainer this instance is.
      **tree_kwargs: Keyword arguments passed to each tree's training_graph.

    Returns:
      The last op in the random forest training graph.

    Raises:
      NotImplementedError: If trying to use bagging with sparse features.
    """
        processed_dense_features, processed_sparse_features, data_spec = (
            data_ops.ParseDataTensorOrDict(input_data))

        if input_labels is not None:
            labels = data_ops.ParseLabelTensorOrDict(input_labels)

        data_spec = data_spec or self.get_default_data_spec(input_data)

        tree_graphs = []
        trees_per_trainer = self.params.num_trees / num_trainers
        tree_start = int(trainer_id * trees_per_trainer)
        tree_end = int((trainer_id + 1) * trees_per_trainer)
        for i in range(tree_start, tree_end):
            with ops.device(self.variables.device_dummies[i].device):
                seed = self.params.base_random_seed
                if seed != 0:
                    seed += i
                # If using bagging, randomly select some of the input.
                tree_data = processed_dense_features
                tree_labels = labels
                if self.params.bagging_fraction < 1.0:
                    # TODO(gilberth): Support bagging for sparse features.
                    if processed_sparse_features is not None:
                        raise NotImplementedError(
                            'Bagging not supported with sparse features.')
                    # TODO(thomaswc): This does sampling without replacement.  Consider
                    # also allowing sampling with replacement as an option.
                    batch_size = array_ops.strided_slice(
                        array_ops.shape(processed_dense_features), [0], [1])
                    r = random_ops.random_uniform(batch_size, seed=seed)
                    mask = math_ops.less(
                        r,
                        array_ops.ones_like(r) * self.params.bagging_fraction)
                    gather_indices = array_ops.squeeze(
                        array_ops.where_v2(mask), axis=[1])
                    # TODO(thomaswc): Calculate out-of-bag data and labels, and store
                    # them for use in calculating statistics later.
                    tree_data = array_ops.gather(processed_dense_features,
                                                 gather_indices)
                    tree_labels = array_ops.gather(labels, gather_indices)
                if self.params.bagged_features:
                    if processed_sparse_features is not None:
                        raise NotImplementedError(
                            'Feature bagging not supported with sparse features.'
                        )
                    tree_data = self._bag_features(i, tree_data)

                tree_graphs.append(self.trees[i].training_graph(
                    tree_data,
                    tree_labels,
                    seed,
                    data_spec=data_spec,
                    sparse_features=processed_sparse_features,
                    **tree_kwargs))

        return control_flow_ops.group(*tree_graphs, name='train')
Exemplo n.º 13
0
def pinv(a, rcond=None, validate_args=False, name=None):
    """Compute the Moore-Penrose pseudo-inverse of one or more matrices.

  Calculate the [generalized inverse of a matrix](
  https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its
  singular-value decomposition (SVD) and including all large singular values.

  The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves'
  [the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then
  `A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if
  `U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then
  `A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1]

  This function is analogous to [`numpy.linalg.pinv`](
  https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html).
  It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the
  default `rcond` is `1e-15`. Here the default is
  `10. * max(num_rows, num_cols) * np.finfo(dtype).eps`.

  Args:
    a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
      pseudo-inverted.
    rcond: `Tensor` of small singular value cutoffs.  Singular values smaller
      (in modulus) than `rcond` * largest_singular_value (again, in modulus) are
      set to zero. Must broadcast against `tf.shape(a)[:-2]`.
      Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`.
    validate_args: When `True`, additional assertions might be embedded in the
      graph.
      Default value: `False` (i.e., no graph assertions are added).
    name: Python `str` prefixed to ops created by this function.
      Default value: 'pinv'.

  Returns:
    a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except
      rightmost two dimensions are transposed.

  Raises:
    TypeError: if input `a` does not have `float`-like `dtype`.
    ValueError: if input `a` has fewer than 2 dimensions.

  #### Examples

  ```python
  import tensorflow as tf
  import tensorflow_probability as tfp

  a = tf.constant([[1.,  0.4,  0.5],
                   [0.4, 0.2,  0.25],
                   [0.5, 0.25, 0.35]])
  tf.matmul(tf.linalg..pinv(a), a)
  # ==> array([[1., 0., 0.],
               [0., 1., 0.],
               [0., 0., 1.]], dtype=float32)

  a = tf.constant([[1.,  0.4,  0.5,  1.],
                   [0.4, 0.2,  0.25, 2.],
                   [0.5, 0.25, 0.35, 3.]])
  tf.matmul(tf.linalg..pinv(a), a)
  # ==> array([[ 0.76,  0.37,  0.21, -0.02],
               [ 0.37,  0.43, -0.33,  0.02],
               [ 0.21, -0.33,  0.81,  0.01],
               [-0.02,  0.02,  0.01,  1.  ]], dtype=float32)
  ```

  #### References

  [1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press,
       Inc., 1980, pp. 139-142.
  """
    with ops.name_scope(name or 'pinv'):
        a = ops.convert_to_tensor(a, name='a')

        assertions = _maybe_validate_matrix(a, validate_args)
        if assertions:
            with ops.control_dependencies(assertions):
                a = array_ops.identity(a)

        dtype = a.dtype.as_numpy_dtype

        if rcond is None:

            def get_dim_size(dim):
                dim_val = tensor_shape.dimension_value(a.shape[dim])
                if dim_val is not None:
                    return dim_val
                return array_ops.shape(a)[dim]

            num_rows = get_dim_size(-2)
            num_cols = get_dim_size(-1)
            if isinstance(num_rows, int) and isinstance(num_cols, int):
                max_rows_cols = float(max(num_rows, num_cols))
            else:
                max_rows_cols = math_ops.cast(
                    math_ops.maximum(num_rows, num_cols), dtype)
            rcond = 10. * max_rows_cols * np.finfo(dtype).eps

        rcond = ops.convert_to_tensor(rcond, dtype=dtype, name='rcond')

        # Calculate pseudo inverse via SVD.
        # Note: if a is Hermitian then u == v. (We might observe additional
        # performance by explicitly setting `v = u` in such cases.)
        [
            singular_values,  # Sigma
            left_singular_vectors,  # U
            right_singular_vectors,  # V
        ] = svd(a, full_matrices=False, compute_uv=True)

        # Saturate small singular values to inf. This has the effect of make
        # `1. / s = 0.` while not resulting in `NaN` gradients.
        cutoff = rcond * math_ops.reduce_max(singular_values, axis=-1)
        singular_values = array_ops.where_v2(
            singular_values > array_ops.expand_dims_v2(cutoff, -1),
            singular_values, np.array(np.inf, dtype))

        # By the definition of the SVD, `a == u @ s @ v^H`, and the pseudo-inverse
        # is defined as `pinv(a) == v @ inv(s) @ u^H`.
        a_pinv = math_ops.matmul(right_singular_vectors /
                                 array_ops.expand_dims_v2(singular_values, -2),
                                 left_singular_vectors,
                                 adjoint_b=True)

        if a.shape is not None and a.shape.rank is not None:
            a_pinv.set_shape(a.shape[:-2].concatenate(
                [a.shape[-1], a.shape[-2]]))

        return a_pinv
Exemplo n.º 14
0
 def _cdf(self, x):
     # Take Abs(scale) to make subsequent where work correctly.
     y = (x - self.loc) / math_ops.abs(self.scale)
     x_t = self.df / (y**2. + self.df)
     neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
     return array_ops.where_v2(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf)