Esempio n. 1
0
 def testArgsortStable(self):
     arr = constant_op.constant([1, 5, 2, 2, 3], dtype=dtypes.int32)
     ascending = [0, 2, 3, 4, 1]
     descending = [1, 4, 2, 3, 0]
     with self.cached_session():
         self.assertAllEqual(
             sort_ops.argsort(arr, direction='ASCENDING', stable=True),
             ascending)
         self.assertAllEqual(
             sort_ops.argsort(arr, direction='DESCENDING', stable=True),
             descending)
Esempio n. 2
0
 def testArgsort(self):
   arr = np.random.random((5, 6, 7, 8))
   for axis in range(4):
     with self.cached_session():
       self.assertAllEqual(
           np.argsort(arr, axis=axis),
           sort_ops.argsort(arr, axis=axis).eval())
Esempio n. 3
0
 def testArgsortTensorShape(self):
     with ops.Graph().as_default():
         placeholder = array_ops.placeholder(dtypes.float32,
                                             shape=[1, None, 5])
         for axis in range(3):
             with self.cached_session():
                 self.assertAllEqual(
                     placeholder.shape.as_list(),
                     sort_ops.argsort(placeholder,
                                      axis=axis).shape.as_list())
Esempio n. 4
0
    def sample(self, time, outputs, state, name=None):
        """sample for SampleEmbeddingHelper."""
        del time, state  # unused by sample_fn
        # Outputs are logits, we sample instead of argmax (greedy).
        if not isinstance(outputs, ops.Tensor):
            raise TypeError("Expected outputs to be a single Tensor, got: %s" %
                            type(outputs))

        probs = nn_ops.softmax(outputs, -1)
        sorted_args = sort_ops.argsort(probs, -1, direction='DESCENDING')
        sorted_nucleus_probs = math_ops.cumsum(
            sort_ops.sort(probs, -1, direction='DESCENDING'),
            -1) < self._nucleus
        nucleus_probs = array_ops.gather(sorted_nucleus_probs,
                                         sort_ops.argsort(
                                             sorted_args,
                                             -1,
                                             direction='ASCENDING'),
                                         batch_dims=1)
        argmax_probs = array_ops.one_hot(math_ops.argmax(outputs, -1),
                                         depth=array_ops.shape(outputs)[-1],
                                         on_value=True,
                                         off_value=False,
                                         dtype=dtypes.bool)
        outputs = array_ops.where(
            (nucleus_probs | argmax_probs), outputs,
            -np.inf * array_ops.ones_like(outputs, dtype=dtypes.float32))

        if self._softmax_temperature is None:
            logits = outputs
        else:
            logits = outputs / self._softmax_temperature

        sample_ids = categorical_sample(logits=logits, seed=self._seed)

        return sample_ids
Esempio n. 5
0
      def Compute(x):
        e, v = linalg_ops.eig(x)

        # We sort eigenvalues by e.real+e.imag to have consistent
        # order between runs
        b_dims = len(e.shape) - 1
        idx = sort_ops.argsort(math_ops.real(e) + math_ops.imag(e), axis=-1)
        e = array_ops.gather(e, idx, batch_dims=b_dims)
        v = array_ops.gather(v, idx, batch_dims=b_dims)

        # (complex) Eigenvectors are only unique up to an arbitrary phase
        # We normalize the vectors such that the first component has phase 0.
        top_rows = v[..., 0:1, :]
        angle = -math_ops.angle(top_rows)
        phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
        v *= phase
        return e, v
Esempio n. 6
0
def merge_summaries(prev_summary, next_summary, epsilon):
  """Weighted merge sort of summaries.

  Given two summaries of distinct data, this function merges (and compresses)
  them to stay within `epsilon` error tolerance.

  Args:
      prev_summary: 2-D `np.ndarray` summary to be merged with `next_summary`.
      next_summary: 2-D `np.ndarray` summary to be merged with `prev_summary`.
      epsilon: A float that determines the approxmiate desired precision.

  Returns:
      A 2-D `np.ndarray` that is a merged summary. First column is the
      interpolated partition values, the second is the weights (counts).
  """
  merged = array_ops.concat((prev_summary, next_summary), axis=1)
  merged = array_ops.gather_v2(merged, sort_ops.argsort(merged[0]), axis=1)
  return compress(merged, epsilon)
Esempio n. 7
0
    def _get_vocab_and_ids(self):
        export = getattr(self._vocab_lookup_table, 'export', None)
        if export is None:
            table = getattr(self._vocab_lookup_table, '_table')
            export = table.export

        vocab, ids = export()  # pylint: disable=protected-access

        # `.export` doesn't set the shapes.
        vocab = check_ops.ensure_shape(vocab, [
            None,
        ])
        ids = check_ops.ensure_shape(ids, [
            None,
        ])

        order = sort_ops.argsort(ids)

        ids = array_ops.gather(ids, order)
        vocab = array_ops.gather(vocab, order)

        return vocab, ids
Esempio n. 8
0
def _tf_sorted(iterable, key, reverse):
  """Overload of sorted_ for Tensor iterable."""
  if reverse is UNSPECIFIED:
    direction = 'ASCENDING'
  else:
    direction = 'DESCENDING'
  if key is not UNSPECIFIED:
    mapped = parallel_ops.vectorized_map(key, iterable)
    if mapped.shape.rank is not None and mapped.shape.rank != 1:
      raise ValueError('sort only supports only 1D tensors')
    with ops.control_dependencies([
        check_ops.assert_rank_v2(mapped, 1,
                                 'sort only supports only 1D tensors')
    ]):
      order = sort_ops.argsort(mapped, direction=direction)
      return array_ops.gather_v2(iterable, order)
  if iterable.shape.rank is not None and iterable.shape.rank != 1:
    raise ValueError('sort only supports only 1D tensors')
  with ops.control_dependencies([
      check_ops.assert_rank_v2(iterable, 1,
                               'sort only supports only 1D tensors')
  ]):
    return sort_ops.sort(iterable, direction=direction)
Esempio n. 9
0
    def _matmul(self, x, adjoint=False, adjoint_arg=False):
        perm = ops.convert_to_tensor_v2_with_dispatch(self.perm)
        if adjoint and not self.is_self_adjoint:
            # TODO(srvasude): invert_permutation doesn't work on batches so we use
            # argsort.
            perm = sort_ops.argsort(perm, axis=-1)
        x = linalg.adjoint(x) if adjoint_arg else x

        # We need to broadcast x and the permutation since tf.gather doesn't
        # broadcast.
        broadcast_shape = array_ops.broadcast_dynamic_shape(
            array_ops.shape(x)[:-1], array_ops.shape(perm))
        k = array_ops.shape(x)[-1]
        broadcast_x_shape = array_ops.concat([broadcast_shape, [k]], axis=-1)
        x = array_ops.broadcast_to(x, broadcast_x_shape)
        perm = array_ops.broadcast_to(perm, broadcast_shape)

        m = array_ops.shape(x)[-2]
        x = array_ops.reshape(x, [-1, m, k])
        perm = array_ops.reshape(perm, [-1, m])

        y = array_ops.gather(x, perm, axis=-2, batch_dims=1)
        return array_ops.reshape(y, broadcast_x_shape)
Esempio n. 10
0
def _lovasz_jaccard_flat(errors, y_true):
    '''PRIVATE: calculate lovasz extension for jaccard index along a vector.
    Input:
        errors: error vector (should be in 0~1).
        y_true: labels.
    Output:
        scalar: the jaccard index calculated on the input vector. 
    '''
    p = errors.get_shape().as_list()
    if len(p) != 1:
        raise ValueError('Input should be vectors (1D).')
    p = p[0]
    bin_y_true = math_ops.cast(gen_math_ops.greater(y_true, 0.5),
                               dtype=errors.dtype)
    error_ind = sort_ops.argsort(errors, direction='DESCENDING')
    sorted_errors = array_ops.gather(errors, error_ind)
    sorted_labels = array_ops.gather(bin_y_true, error_ind)
    get_sum = math_ops.reduce_sum(sorted_labels)
    intersection = get_sum - math_ops.cumsum(sorted_labels)
    union = get_sum + math_ops.cumsum(1.0 - sorted_labels)
    g = 1.0 - math_ops.div_no_nan(intersection, union)
    if p > 1:
        g = array_ops.concat((g[0:1], g[1:] - g[:-1]), axis=0)
    return math_ops.reduce_sum(sorted_errors * gen_array_ops.stop_gradient(g))
Esempio n. 11
0
def stack_dynamic_partitions(data, partitions, num_partitions, name=None):
  """Stacks dynamic partitions of a Tensor or RaggedTensor.

  Returns a RaggedTensor `output` with `num_partitions` rows, where the row
  `output[i]` is formed by stacking all slices `data[j1...jN]` such that
  `partitions[j1...jN] = i`.  Slices of `data` are stacked in row-major
  order.

  If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to
  `tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`.

  #### Example:

  >>> data           = ['a', 'b', 'c', 'd', 'e']
  >>> partitions     = [  3,   0,   2,   2,   3]
  >>> num_partitions = 5
  >>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions)
  <tf.RaggedTensor [[b'b'], [], [b'c', b'd'], [b'a', b'e'], []]>

  Args:
    data: A `Tensor` or `RaggedTensor` containing the values to stack.
    partitions: An `int32` or `int64` `Tensor` or `RaggedTensor` specifying the
      partition that each slice of `data` should be added to. `partitions.shape`
      must be a prefix of `data.shape`.  Values must be greater than or equal to
      zero, and less than `num_partitions`. `partitions` is not required to be
      sorted.
    num_partitions: An `int32` or `int64` scalar specifying the number of
      partitions to output.  This determines the number of rows in `output`.
    name: A name prefix for the returned tensor (optional).

  Returns:
    A `RaggedTensor` containing the stacked partitions.  The returned tensor
    has the same dtype as `data`, and its shape is
    `[num_partitions, (D)] + data.shape[partitions.rank:]`, where `(D)` is a
    ragged dimension whose length is the number of data slices stacked for
    each `partition`.
  """
  with ops.name_scope(name, 'SegmentStack', [data, partitions, num_partitions]):
    # Convert inputs to tensors.
    data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
    row_splits_dtype = (
        data.row_splits.dtype
        if isinstance(data, ragged_tensor.RaggedTensor) else None)
    partitions = ragged_tensor.convert_to_tensor_or_ragged_tensor(
        partitions, name='partitions', preferred_dtype=row_splits_dtype)
    num_partitions = ops.convert_to_tensor(
        num_partitions, name='num_partitions', preferred_dtype=partitions.dtype)
    if row_splits_dtype is not None:
      partitions = math_ops.cast(partitions, row_splits_dtype)
    num_partitions = math_ops.cast(num_partitions, partitions.dtype)

    # Sanity-checks for shapes.
    partitions_rank = partitions.shape.ndims
    if partitions_rank is None:
      raise ValueError('partitions must have known rank.')
    num_partitions.shape.assert_has_rank(0)
    partitions.shape.assert_is_compatible_with(data.shape[:partitions_rank])

    if partitions_rank == 0:
      # If partitions is a scalar, then just create a RaggedTensor containing
      # that single the complete `data` value in the specified row.
      return ragged_tensor.RaggedTensor.from_value_rowids(
          values=array_ops.stack([data]),
          value_rowids=array_ops.stack([partitions]),
          nrows=num_partitions,
          validate=False)

    elif partitions_rank == 1:
      # If partitions is a vector (the typical case): we can just use data and
      # partitions as the `values` and `value_rowids` for `from_value_rowids`,
      # as long as we sort them first.
      permutation = sort_ops.argsort(partitions, stable=True)
      value_rowids = array_ops.gather(partitions, permutation)
      values = array_ops.gather(data, permutation)
      check = check_ops.assert_less(
          value_rowids[-1:],
          num_partitions,
          message='partitions must be less than num_partitions')
      with ops.control_dependencies([check]):
        return ragged_tensor.RaggedTensor.from_value_rowids(
            values, value_rowids, nrows=num_partitions, validate=False)

    else:
      # Handle higher-dimensional partitions via recursion.
      if not isinstance(data, ragged_tensor.RaggedTensor):
        data = ragged_tensor.RaggedTensor.from_tensor(
            data, row_splits_dtype=partitions.dtype, ragged_rank=1)
      if not isinstance(partitions, ragged_tensor.RaggedTensor):
        partitions = ragged_tensor.RaggedTensor.from_tensor(
            partitions,
            row_splits_dtype=partitions.dtype,
            ragged_rank=max(data.ragged_rank, partitions_rank - 1))
      check = check_ops.assert_equal(
          data.row_splits,
          partitions.row_splits,
          message='data and partitions have incompatible ragged shapes')
      with ops.control_dependencies([check]):
        return stack_dynamic_partitions(data.values, partitions.values,
                                        num_partitions)
Esempio n. 12
0
    def _argsort(a, axis, stable):
        if axis is None:
            a = array_ops.reshape(a, [-1])
            axis = 0

        return sort_ops.argsort(a, axis, stable=stable)
Esempio n. 13
0
 def testArgsort(self):
     arr = np.random.random((5, 6, 7, 8))
     for axis in range(4):
         with self.cached_session():
             self.assertAllEqual(np.argsort(arr, axis=axis),
                                 sort_ops.argsort(arr, axis=axis).eval())
Esempio n. 14
0
 def testArgsort_1d(self):
     arr = np.random.random(42)
     with self.cached_session():
         self.assertAllEqual(
             np.sort(arr),
             array_ops.gather(arr, sort_ops.argsort(arr)).eval())
Esempio n. 15
0
 def testArgsort_1d(self):
   arr = np.random.random(42)
   with self.cached_session():
     self.assertAllEqual(
         np.sort(arr),
         array_ops.gather(arr, sort_ops.argsort(arr)).eval())