def dense_make_stats_update(is_active, are_buckets_ready, float_column,
                            quantile_buckets, example_partition_ids, gradients,
                            hessians, weights, empty_gradients, empty_hessians):
  """Updates the state for dense split handler."""
  empty_float = constant_op.constant_v1([], dtype=dtypes.float32)

  quantile_values, quantile_weights = control_flow_ops.cond(
      is_active[1],  # For the next layer, this handler is inactive.
      lambda: (float_column, weights),
      lambda: (empty_float, empty_float))

  def ready_inputs_fn():
    """Branch to execute when quantiles are ready."""
    quantized_feature = quantile_ops.quantiles([float_column], [],
                                               [quantile_buckets], [], [])
    quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
    quantized_feature = array_ops.squeeze(quantized_feature, axis=0)
    return (example_partition_ids, quantized_feature, gradients, hessians)

  def not_ready_inputs_fn():
    return (constant_op.constant_v1([], dtype=dtypes.int32),
            constant_op.constant_v1([[]], dtype=dtypes.int64, shape=[1, 2]),
            empty_gradients, empty_hessians)

  example_partition_ids, feature_ids, gradients, hessians = (
      control_flow_ops.cond(
          math_ops.logical_and(
              math_ops.logical_and(are_buckets_ready,
                                   array_ops.size(quantile_buckets) > 0),
              is_active[0]), ready_inputs_fn, not_ready_inputs_fn))
  return (quantile_values, quantile_weights, example_partition_ids, feature_ids,
          gradients, hessians)
示例#2
0
 def is_initialized(self, name=None):
   # We have to cast the self._index.values() to a `list` because when we
   # use `model_to_estimator` to run tf.keras models, self._index.values() is
   # of type `dict_values` and not `list`.
   values_list = list(self._index.values())
   result = values_list[0].is_initialized()
   # We iterate through the list of values except the last one to allow us to
   # name the final `logical_and` op the same name that is passed by the user
   # to the `is_initialized` op. For tower local variables, the
   # `is_initialized` op is a `logical_and` op.
   for v in values_list[1:-1]:
     result = math_ops.logical_and(result, v.is_initialized())
   result = math_ops.logical_and(result, values_list[-1].is_initialized(),
                                 name=name)
   return result
示例#3
0
 def maybe_update_masks():
   with ops.name_scope(self._spec.name):
     is_step_within_pruning_range = math_ops.logical_and(
         math_ops.greater_equal(self._global_step,
                                self._spec.begin_pruning_step),
         # If end_pruning_step is negative, keep pruning forever!
         math_ops.logical_or(
             math_ops.less_equal(self._global_step,
                                 self._spec.end_pruning_step),
             math_ops.less(self._spec.end_pruning_step, 0)))
     is_pruning_step = math_ops.less_equal(
         math_ops.add(self._last_update_step, self._spec.pruning_frequency),
         self._global_step)
     return math_ops.logical_and(is_step_within_pruning_range,
                                 is_pruning_step)
示例#4
0
def _filter_input(input_tensor, vocab_freq_table, vocab_min_count,
                  vocab_subsampling, corpus_size, seed):
  """Filters input tensor based on vocab freq, threshold, and subsampling."""
  if vocab_freq_table is None:
    return input_tensor

  if not isinstance(vocab_freq_table, lookup.InitializableLookupTableBase):
    raise ValueError(
        "vocab_freq_table must be a subclass of "
        "InitializableLookupTableBase (such as HashTable) instead of type "
        "{}.".format(type(vocab_freq_table)))

  with ops.name_scope(
      "filter_vocab", values=[vocab_freq_table, input_tensor, vocab_min_count]):
    freq = vocab_freq_table.lookup(input_tensor)
    # Filters out elements in input_tensor that are not found in
    # vocab_freq_table (table returns a default value of -1 specified above when
    # an element is not found).
    mask = math_ops.not_equal(freq, vocab_freq_table.default_value)

    # Filters out elements whose vocab frequencies are less than the threshold.
    if vocab_min_count is not None:
      cast_threshold = math_ops.cast(vocab_min_count, freq.dtype)
      mask = math_ops.logical_and(mask,
                                  math_ops.greater_equal(freq, cast_threshold))

    input_tensor = array_ops.boolean_mask(input_tensor, mask)
    freq = array_ops.boolean_mask(freq, mask)

  if not vocab_subsampling:
    return input_tensor

  if vocab_subsampling < 0 or vocab_subsampling > 1:
    raise ValueError(
        "Invalid vocab_subsampling={} - it should be within range [0, 1].".
        format(vocab_subsampling))

  # Subsamples the input tokens based on vocabulary frequency and
  # vocab_subsampling threshold (ie randomly discard commonly appearing
  # tokens).
  with ops.name_scope(
      "subsample_vocab", values=[input_tensor, freq, vocab_subsampling]):
    corpus_size = math_ops.cast(corpus_size, dtypes.float64)
    freq = math_ops.cast(freq, dtypes.float64)
    vocab_subsampling = math_ops.cast(vocab_subsampling, dtypes.float64)

    # From tensorflow_models/tutorials/embedding/word2vec_kernels.cc, which is
    # suppose to correlate with Eq. 5 in http://arxiv.org/abs/1310.4546.
    keep_prob = ((math_ops.sqrt(freq /
                                (vocab_subsampling * corpus_size)) + 1.0) *
                 (vocab_subsampling * corpus_size / freq))
    random_prob = random_ops.random_uniform(
        array_ops.shape(freq),
        minval=0,
        maxval=1,
        dtype=dtypes.float64,
        seed=seed)

    mask = math_ops.less_equal(random_prob, keep_prob)
    return array_ops.boolean_mask(input_tensor, mask)
示例#5
0
  def mode(self, name="mode"):
    """Mode of the distribution.

    Note that the mode for the Beta distribution is only defined
    when `a > 1`, `b > 1`. This returns the mode when `a > 1` and `b > 1`,
    and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
    will be raised rather than returning `NaN`.

    Args:
      name: The name for this op.

    Returns:
      Mode of the Beta distribution.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self._a, self._b, self._a_b_sum], name):
        a = self._a
        b = self._b
        a_b_sum = self._a_b_sum
        one = constant_op.constant(1, self.dtype)
        mode = (a - 1)/ (a_b_sum - 2)

        if self.allow_nan_stats:
          return math_ops.select(
              math_ops.logical_and(
                  math_ops.greater(a, 1), math_ops.greater(b, 1)),
              mode,
              (constant_op.constant(float("NaN"), dtype=self.dtype) *
               array_ops.ones_like(a_b_sum, dtype=self.dtype)))
        else:
          return control_flow_ops.with_dependencies([
              check_ops.assert_less(one, a),
              check_ops.assert_less(one, b)], mode)
示例#6
0
文件: beta.py 项目: caisq/tensorflow
 def _mode(self):
     mode = (self.a - 1.0) / (self.a_b_sum - 2.0)
     if self.allow_nan_stats:
         nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
         return math_ops.select(
             math_ops.logical_and(math_ops.greater(self.a, 1.0), math_ops.greater(self.b, 1.0)),
             mode,
             array_ops.fill(self.batch_shape(), nan, name="nan"),
         )
     else:
         return control_flow_ops.with_dependencies(
             [
                 check_ops.assert_less(
                     array_ops.ones((), dtype=self.dtype),
                     self.a,
                     message="Mode not defined for components of a <= 1.",
                 ),
                 check_ops.assert_less(
                     array_ops.ones((), dtype=self.dtype),
                     self.b,
                     message="Mode not defined for components of b <= 1.",
                 ),
             ],
             mode,
         )
示例#7
0
def get_seed(seed):
  """Returns the local seeds an operation should use given an op-specific seed.

  See `tf.compat.v1.get_seed` for more details. This wrapper adds support for
  the case
  where `seed` may be a tensor.

  Args:
    seed: An integer or a `tf.int64` scalar tensor.

  Returns:
    A tuple of two `tf.int64` scalar tensors that should be used for the local
    seed of the calling dataset.
  """
  seed, seed2 = random_seed.get_seed(seed)
  if seed is None:
    seed = constant_op.constant(0, dtype=dtypes.int64, name="seed")
  else:
    seed = ops.convert_to_tensor(seed, dtype=dtypes.int64, name="seed")
  if seed2 is None:
    seed2 = constant_op.constant(0, dtype=dtypes.int64, name="seed2")
  else:
    with ops.name_scope("seed2") as scope:
      seed2 = ops.convert_to_tensor(seed2, dtype=dtypes.int64)
      seed2 = array_ops.where(
          math_ops.logical_and(
              math_ops.equal(seed, 0), math_ops.equal(seed2, 0)),
          constant_op.constant(2**31 - 1, dtype=dtypes.int64),
          seed2,
          name=scope)
  return seed, seed2
示例#8
0
    def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
      """Internal while_loop body.

      Args:
        time: scalar int32 tensor.
        outputs_ta: structure of TensorArray.
        state: (structure of) state tensors and TensorArrays.
        inputs: (structure of) input tensors.
        finished: bool tensor (keeping track of what's finished).
        sequence_lengths: int32 tensor (keeping track of time of finish).

      Returns:
        `(time + 1, outputs_ta, next_state, next_inputs, next_finished,
          next_sequence_lengths)`.
        ```
      """
      (next_outputs, decoder_state, next_inputs,
       decoder_finished) = decoder.step(time, inputs, state)
      next_finished = math_ops.logical_or(decoder_finished, finished)
      if maximum_iterations is not None:
        next_finished = math_ops.logical_or(
            next_finished, time + 1 >= maximum_iterations)
      next_sequence_lengths = array_ops.where(
          math_ops.logical_and(math_ops.logical_not(finished), next_finished),
          array_ops.fill(array_ops.shape(sequence_lengths), time + 1),
          sequence_lengths)

      nest.assert_same_structure(state, decoder_state)
      nest.assert_same_structure(outputs_ta, next_outputs)
      nest.assert_same_structure(inputs, next_inputs)

      # Zero out output values past finish
      if impute_finished:
        emit = nest.map_structure(
            lambda out, zero: array_ops.where(finished, zero, out),
            next_outputs,
            zero_outputs)
      else:
        emit = next_outputs

      # Copy through states past finish
      def _maybe_copy_state(new, cur):
        # TensorArrays and scalar states get passed through.
        if isinstance(cur, tensor_array_ops.TensorArray):
          pass_through = True
        else:
          new.set_shape(cur.shape)
          pass_through = (new.shape.ndims == 0)
        return new if pass_through else array_ops.where(finished, cur, new)

      if impute_finished:
        next_state = nest.map_structure(
            _maybe_copy_state, decoder_state, state)
      else:
        next_state = decoder_state

      outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
                                      outputs_ta, emit)
      return (time + 1, outputs_ta, next_state, next_inputs, next_finished,
              next_sequence_lengths)
示例#9
0
def collapse_repeated(labels, seq_length, name=None):
  """Merge repeated labels into single labels.

  Args:
    labels: Tensor of shape [batch, max value in seq_length]
    seq_length: Tensor of shape [batch], sequence length of each batch element.
    name: A name for this `Op`. Defaults to "collapse_repeated_labels".

  Returns:
    A tuple `(collapsed_labels, new_seq_length)` where

    collapsed_labels: Tensor of shape [batch, max_seq_length] with repeated
    labels collapsed and padded to max_seq_length, eg:
    `[[A, A, B, B, A], [A, B, C, D, E]] => [[A, B, A, 0, 0], [A, B, C, D, E]]`

    new_seq_length: int tensor of shape [batch] with new sequence lengths.
  """

  with ops.name_scope(name, "collapse_repeated_labels", [labels, seq_length]):
    labels = ops.convert_to_tensor(labels, name="labels")
    seq_length = ops.convert_to_tensor(seq_length, name="seq_length")

    # Mask labels that don't equal previous label.
    label_mask = array_ops.concat([
        array_ops.ones_like(labels[:, :1], dtypes.bool),
        math_ops.not_equal(labels[:, 1:], labels[:, :-1])
    ],
                                  axis=1)

    # Filter labels that aren't in the original sequence.
    maxlen = _get_dim(labels, 1)
    seq_mask = array_ops.sequence_mask(seq_length, maxlen=maxlen)
    label_mask = math_ops.logical_and(label_mask, seq_mask)

    # Count masks for new sequence lengths.
    new_seq_len = math_ops.reduce_sum(
        math_ops.cast(label_mask, dtypes.int32), axis=1)

    # Mask indexes based on sequence length mask.
    new_maxlen = math_ops.reduce_max(new_seq_len)
    idx_mask = array_ops.sequence_mask(new_seq_len, maxlen=new_maxlen)

    # Flatten everything and mask out labels to keep and sparse indices.
    flat_labels = array_ops.reshape(labels, [-1])
    flat_label_mask = array_ops.reshape(label_mask, [-1])
    flat_idx_mask = array_ops.reshape(idx_mask, [-1])
    idx = math_ops.range(_get_dim(flat_idx_mask, 0))

    # Scatter to flat shape.
    flat = array_ops.scatter_nd(
        indices=array_ops.expand_dims(
            array_ops.boolean_mask(idx, flat_idx_mask), axis=1),
        updates=array_ops.boolean_mask(flat_labels, flat_label_mask),
        shape=array_ops.shape(flat_idx_mask))

    # Reshape back to square batch.
    batch_size = _get_dim(labels, 0)
    new_shape = [batch_size, new_maxlen]
    return (array_ops.reshape(flat, new_shape),
            math_ops.cast(new_seq_len, seq_length.dtype))
示例#10
0
def _prune_invalid_ids(sparse_ids, sparse_weights):
    """Prune invalid IDs (< 0) from the input ids and weights."""
    is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
    if sparse_weights is not None:
        is_id_valid = math_ops.logical_and(is_id_valid, math_ops.greater(sparse_weights.values, 0))
    sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
    if sparse_weights is not None:
        sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
    return sparse_ids, sparse_weights
def _logical_and(*args):
  """Convenience function which attempts to statically `reduce_all`."""
  args_ = [_static_value(x) for x in args]
  if any(x is not None and not bool(x) for x in args_):
    return constant_op.constant(False)
  if all(x is not None and bool(x) for x in args_):
    return constant_op.constant(True)
  if len(args) == 2:
    return math_ops.logical_and(*args)
  return math_ops.reduce_all(args)
示例#12
0
  def undo_make_batch_of_event_sample_matrices(
      self, x, sample_shape, expand_batch_dim=True,
      name="undo_make_batch_of_event_sample_matrices"):
    """Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.

    Where:
      - `B_ = B if B or not expand_batch_dim else [1]`,
      - `E_ = E if E else [1]`,
      - `S_ = [tf.reduce_prod(S)]`.

    This function "reverses" `make_batch_of_event_sample_matrices`.

    Args:
      x: `Tensor` of shape `B_+E_+S_`.
      sample_shape: `Tensor` (1D, `int32`).
      expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
        such that `batch_ndims>=1`.
      name: Python `str`. The name to give this op.

    Returns:
      x: `Tensor`. Input transposed/reshaped to `S+B+E`.
    """
    with self._name_scope(name, values=[x, sample_shape]):
      x = ops.convert_to_tensor(x, name="x")
      # x.shape: _B+_E+[prod(S)]
      sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
      x = distribution_util.rotate_transpose(x, shift=1)
      # x.shape: [prod(S)]+_B+_E
      if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
        if self._batch_ndims_is_0 or self._event_ndims_is_0:
          squeeze_dims = []
          if self._event_ndims_is_0:
            squeeze_dims += [-1]
          if self._batch_ndims_is_0 and expand_batch_dim:
            squeeze_dims += [1]
          if squeeze_dims:
            x = array_ops.squeeze(x, axis=squeeze_dims)
            # x.shape: [prod(S)]+B+E
        _, batch_shape, event_shape = self.get_shape(x)
      else:
        s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
             else array_ops.shape(x))
        batch_shape = s[1:1+self.batch_ndims]
        # Since sample_dims=1 and is left-most, we add 1 to the number of
        # batch_ndims to get the event start dim.
        event_start = array_ops.where(
            math_ops.logical_and(expand_batch_dim, self._batch_ndims_is_0),
            2, 1 + self.batch_ndims)
        event_shape = s[event_start:event_start+self.event_ndims]
      new_shape = array_ops.concat([sample_shape, batch_shape, event_shape], 0)
      x = array_ops.reshape(x, shape=new_shape)
      # x.shape: S+B+E
      return x
示例#13
0
 def wrapped_cond(loop_counter, *args):
   # Convert the flow variables in `args` to TensorArrays. `args` should
   # already have the same structure as `orig_loop_vars` but currently there
   # is no nest.zip so we call `_pack_sequence_as` which flattens both
   # `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays
   # and packs it into the structure of `orig_loop_vars`.
   if maximum_iterations is None:
     return cond(*_pack_sequence_as(orig_loop_vars, args))
   else:
     return math_ops.logical_and(
         loop_counter < maximum_iterations,
         cond(*_pack_sequence_as(orig_loop_vars, args)))
示例#14
0
    def element_to_bucket_id(*args):
      """Return int64 id of the length bucket for this element."""
      seq_length = element_length_func(*args)

      boundaries = list(bucket_boundaries)
      buckets_min = [np.iinfo(np.int32).min] + boundaries
      buckets_max = boundaries + [np.iinfo(np.int32).max]
      conditions_c = math_ops.logical_and(
          math_ops.less_equal(buckets_min, seq_length),
          math_ops.less(seq_length, buckets_max))
      bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))

      return bucket_id
 def _get_training_value(self, training=None):
   if training is None:
     training = K.learning_phase()
   if self._USE_V2_BEHAVIOR:
     if isinstance(training, int):
       training = bool(training)
     if base_layer_utils.is_in_keras_graph():
       training = math_ops.logical_and(training, self._get_trainable_var())
     elif not self.trainable:
       # When the layer is not trainable, it overrides the value passed from
       # model.
       training = self.trainable
   return training
示例#16
0
    def element_to_bucket_id(*args):
      """Return int64 id of the length bucket for this element."""
      seq_length = element_length_func(*args)

      boundaries = list(bucket_boundaries)
      buckets_min = [np.iinfo(np.int32).min] + boundaries
      buckets_max = boundaries + [np.iinfo(np.int32).max]
      conditions_c = math_ops.logical_and(
          math_ops.less_equal(buckets_min, seq_length),
          math_ops.less(seq_length, buckets_max))
      bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))

      return bucket_id
示例#17
0
 def fn_with_cond(*inner_args, **inner_kwds):
     """Conditionally runs initialization if it's needed."""
     condition = True
     for wr in self._created_variables:
         variable = wr()
         if variable is None:
             raise ValueError(
                 "A tf.Variable created inside your tf.function has been"
                 " garbage-collected. Your code needs to keep Python references"
                 " to variables created inside `tf.function`s.\n"
                 "\n"
                 "A common way to raise this error is to create and return a"
                 " variable only referenced inside your function:\n"
                 "\n"
                 "@tf.function\n"
                 "def f():\n"
                 "  v = tf.Variable(1.0)\n"
                 "  return v\n"
                 "\n"
                 "v = f()  # Crashes with this error message!\n"
                 "\n"
                 "The reason this crashes is that @tf.function annotated"
                 " function returns a **`tf.Tensor`** with the **value** of the"
                 " variable when the function is called rather than the"
                 " variable instance itself. As such there is no code holding a"
                 " reference to the `v` created inside the function and Python"
                 " garbage collects it.\n"
                 "\n"
                 "The simplest way to fix this issue is to create variables"
                 " outside the function and capture them:\n"
                 "\n"
                 "v = tf.Variable(1.0)\n"
                 "\n"
                 "@tf.function\n"
                 "def f():\n"
                 "  return v\n"
                 "\n"
                 "f()  # <tf.Tensor: ... numpy=1.>\n"
                 "v.assign_add(1.)\n"
                 "f()  # <tf.Tensor: ... numpy=2.>")
         condition = math_ops.logical_and(
             condition,
             resource_variable_ops.var_is_initialized_op(
                 variable.handle))
     # We want to call stateless_fn if possible because it avoids recomputing
     # potentially expensive initializers.
     return control_flow_ops.cond(
         condition,
         lambda: self._stateless_fn(*inner_args, **inner_kwds),
         _call_concrete(self._concrete_stateful_fn, inner_args,
                        inner_kwds))
示例#18
0
        def body(time, outputs_ta, state, inputs, history_masking, hit_ta, finished, sequence_lengths):

            (next_outputs, decoder_state, next_inputs, next_history_masking, next_hit, decoder_finished) = \
                decoder.step(time, inputs, state, history_masking)

            if decoder.tracks_own_finished:
                next_finished = decoder_finished
            else:
                next_finished = math_ops.logical_or(decoder_finished, finished)

            if maximum_iterations is not None:
                next_finished = math_ops.logical_or(next_finished, time + 1 >= maximum_iterations)

            next_sequence_lengths = array_ops.where(math_ops.logical_and(math_ops.logical_not(finished), next_finished),
                                                    array_ops.fill(array_ops.shape(sequence_lengths), time + 1),
                                                    sequence_lengths)

            nest.assert_same_structure(state, decoder_state)
            nest.assert_same_structure(outputs_ta, next_outputs)
            nest.assert_same_structure(inputs, next_inputs)
            nest.assert_same_structure(history_masking, next_history_masking)

            nest.assert_same_structure(hit_ta, next_hit)

            if impute_finished:
                emit = nest.map_structure(lambda out, zero: array_ops.where(finished, zero, out),
                                          next_outputs, zero_outputs)
            else:
                emit = next_outputs

            def _maybe_copy_state(new, cur):
                if isinstance(cur, tensor_array_ops.TensorArray):
                    pass_through = True
                else:
                    new.set_shape(cur.shape)
                    pass_through = (new.shape.ndims == 0)
                return new if pass_through else array_ops.where(finished, cur, new)

            if impute_finished:
                next_state = nest.map_structure(
                    _maybe_copy_state, decoder_state, state)
            else:
                next_state = decoder_state

            outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out), outputs_ta, emit)

            hit_ta = nest.map_structure(lambda ta, out: ta.write(time, out), hit_ta, next_hit)

            return (time + 1, outputs_ta, next_state, next_inputs, next_history_masking, hit_ta, next_finished,
                    next_sequence_lengths)
 def is_initialized(self, name=None):
     if context.executing_eagerly():
         result = self._distributed_variables[0].is_initialized()
         for v in self._distributed_variables[1:-1]:
             result = math_ops.logical_and(result, v.is_initialized())
         result = math_ops.logical_and(
             result,
             self._distributed_variables[-1].is_initialized(),
             name=name)
     else:
         with ops.device(self._devices[0]):
             result = super(PackedDistributedVariable,
                            self).is_initialized(name)
         for d in self._devices[1:-1]:
             with ops.device(d):
                 initialized = super(PackedDistributedVariable,
                                     self).is_initialized(name)
             result = math_ops.logical_and(result, initialized)
         with ops.device(self._devices[-1]):
             initialized = super(PackedDistributedVariable,
                                 self).is_initialized(name)
         result = math_ops.logical_and(result, initialized, name=name)
     return result
示例#20
0
 def fn_with_cond(*inner_args, **inner_kwds):
   """Conditionally runs initialization if it's needed."""
   condition = True
   for wr in self._created_variables:
     variable = wr()
     if variable is None:
       raise ValueError(
           "A tf.Variable created inside your tf.function has been"
           " garbage-collected. Your code needs to keep Python references"
           " to variables created inside `tf.function`s.\n"
           "\n"
           "A common way to raise this error is to create and return a"
           " variable only referenced inside your function:\n"
           "\n"
           "@tf.function\n"
           "def f():\n"
           "  v = tf.Variable(1.0)\n"
           "  return v\n"
           "\n"
           "v = f()  # Crashes with this error message!\n"
           "\n"
           "The reason this crashes is that @tf.function annotated"
           " function returns a **`tf.Tensor`** with the **value** of the"
           " variable when the function is called rather than the"
           " variable instance itself. As such there is no code holding a"
           " reference to the `v` created inside the function and Python"
           " garbage collects it.\n"
           "\n"
           "The simplest way to fix this issue is to create variables"
           " outside the function and capture them:\n"
           "\n"
           "v = tf.Variable(1.0)\n"
           "\n"
           "@tf.function\n"
           "def f():\n"
           "  return v\n"
           "\n"
           "f()  # <tf.Tensor: ... numpy=1.>\n"
           "v.assign_add(1.)\n"
           "f()  # <tf.Tensor: ... numpy=2.>")
     condition = math_ops.logical_and(
         condition, resource_variable_ops.var_is_initialized_op(
             variable.handle))
   # We want to call stateless_fn if possible because it avoids recomputing
   # potentially expensive initializers.
   return control_flow_ops.cond(
       condition,
       lambda: self._stateless_fn(*inner_args, **inner_kwds),
       functools.partial(self._concrete_stateful_fn._filtered_call,  # pylint: disable=protected-access
                         inner_args, inner_kwds))
示例#21
0
def sparsemax_loss(logits, sparsemax, labels, name=None):
    """Computes sparsemax loss function [1].

  [1]: https://arxiv.org/abs/1602.02068

  Args:
    logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
      `float64`.
    sparsemax: A `Tensor`. Must have the same type as `logits`.
    labels: A `Tensor`. Must have the same type as `logits`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `logits`.
  """

    with ops.name_scope(name, "sparsemax_loss",
                        [logits, sparsemax, labels]) as name:
        logits = ops.convert_to_tensor(logits, name="logits")
        sparsemax = ops.convert_to_tensor(sparsemax, name="sparsemax")
        labels = ops.convert_to_tensor(labels, name="labels")

        # In the paper, they call the logits z.
        # A constant can be substracted from logits to make the algorithm
        # more numerically stable in theory. However, there are really no major
        # source numerical instability in this algorithm.
        z = logits

        # sum over support
        # Use a conditional where instead of a multiplication to support z = -inf.
        # If z = -inf, and there is no support (sparsemax = 0), a multiplication
        # would cause 0 * -inf = nan, which is not correct in this case.
        sum_s = array_ops.where(
            math_ops.logical_or(sparsemax > 0, math_ops.is_nan(sparsemax)),
            sparsemax * (z - 0.5 * sparsemax), array_ops.zeros_like(sparsemax))

        # - z_k + ||q||^2
        q_part = labels * (0.5 * labels - z)
        # Fix the case where labels = 0 and z = -inf, where q_part would
        # otherwise be 0 * -inf = nan. But since the lables = 0, no cost for
        # z = -inf should be consideredself.
        # The code below also coveres the case where z = inf. Howeverm in this
        # caose the sparsemax will be nan, which means the sum_s will also be nan,
        # therefor this case doesn't need addtional special treatment.
        q_part_safe = array_ops.where(
            math_ops.logical_and(math_ops.equal(labels,
                                                0), math_ops.is_inf(z)),
            array_ops.zeros_like(z), q_part)

        return math_ops.reduce_sum(sum_s + q_part_safe, axis=1)
示例#22
0
    def interpolate_pr_auc(self):
        """Add option to remove summary."""
        dtp = self.true_positives[:self.num_thresholds -
                                  1] - self.true_positives[1:]
        p = self.true_positives + self.false_positives
        dp = p[:self.num_thresholds - 1] - p[1:]
        prec_slope = math_ops.div_no_nan(dtp,
                                         math_ops.maximum(dp, 0),
                                         name='prec_slope')
        intercept = self.true_positives[1:] - math_ops.multiply(
            prec_slope, p[1:])

        safe_p_ratio = array_ops.where(
            math_ops.logical_and(p[:self.num_thresholds - 1] > 0, p[1:] > 0),
            math_ops.div_no_nan(p[:self.num_thresholds - 1],
                                math_ops.maximum(p[1:], 0),
                                name='recall_relative_ratio'),
            array_ops.ones_like(p[1:]))

        pr_auc_increment = math_ops.div_no_nan(
            prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),
            math_ops.maximum(
                self.true_positives[1:] + self.false_negatives[1:], 0),
            name='pr_auc_increment')

        if self.multi_label:
            by_label_auc = math_ops.reduce_sum(pr_auc_increment,
                                               name=self.name + '_by_label',
                                               axis=0)

            if self._summarize:
                if self.label_weights is None:
                    # Evenly weighted average of the label AUCs.
                    return math_ops.reduce_mean(by_label_auc, name=self.name)
                else:
                    # Weighted average of the label AUCs.
                    return math_ops.div_no_nan(math_ops.reduce_sum(
                        math_ops.multiply(by_label_auc, self.label_weights)),
                                               math_ops.reduce_sum(
                                                   self.label_weights),
                                               name=self.name)
            else:
                return by_label_auc
        else:
            if self._summarize:
                return math_ops.reduce_sum(pr_auc_increment,
                                           name='interpolate_pr_auc')
            else:
                return pr_auc_increment
示例#23
0
def sparsemax_loss(logits, sparsemax, labels, name=None):
  """Computes sparsemax loss function [1].

  [1]: https://arxiv.org/abs/1602.02068

  Args:
    logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
      `float64`.
    sparsemax: A `Tensor`. Must have the same type as `logits`.
    labels: A `Tensor`. Must have the same type as `logits`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `logits`.
  """

  with ops.name_scope(name, "sparsemax_loss",
                      [logits, sparsemax, labels]) as name:
    logits = ops.convert_to_tensor(logits, name="logits")
    sparsemax = ops.convert_to_tensor(sparsemax, name="sparsemax")
    labels = ops.convert_to_tensor(labels, name="labels")

    # In the paper, they call the logits z.
    # A constant can be substracted from logits to make the algorithm
    # more numerically stable in theory. However, there are really no major
    # source numerical instability in this algorithm.
    z = logits

    # sum over support
    # Use a conditional where instead of a multiplication to support z = -inf.
    # If z = -inf, and there is no support (sparsemax = 0), a multiplication
    # would cause 0 * -inf = nan, which is not correct in this case.
    sum_s = array_ops.where(
        math_ops.logical_or(sparsemax > 0, math_ops.is_nan(sparsemax)),
        sparsemax * (z - 0.5 * sparsemax), array_ops.zeros_like(sparsemax))

    # - z_k + ||q||^2
    q_part = labels * (0.5 * labels - z)
    # Fix the case where labels = 0 and z = -inf, where q_part would
    # otherwise be 0 * -inf = nan. But since the lables = 0, no cost for
    # z = -inf should be consideredself.
    # The code below also coveres the case where z = inf. Howeverm in this
    # caose the sparsemax will be nan, which means the sum_s will also be nan,
    # therefor this case doesn't need addtional special treatment.
    q_part_safe = array_ops.where(
        math_ops.logical_and(math_ops.equal(labels, 0), math_ops.is_inf(z)),
        array_ops.zeros_like(z), q_part)

    return math_ops.reduce_sum(sum_s + q_part_safe, axis=1)
示例#24
0
  def is_initialized(self, name=None):
    """Identifies if all the component variables are initialized.

    Args:
      name: Name of the final `logical_and` op.

    Returns:
      The op that evaluates to True or False depending on if all the
      component variables are initialized.
    """
    # We have to cast the self._index.values() to a `list` because when we
    # use `model_to_estimator` to run tf.keras models, self._index.values() is
    # of type `dict_values` and not `list`.
    values_list = list(self._index.values())
    result = values_list[0].is_initialized()
    # We iterate through the list of values except the last one to allow us to
    # name the final `logical_and` op the same name that is passed by the user
    # to the `is_initialized` op. For distributed variables, the
    # `is_initialized` op is a `logical_and` op.
    for v in values_list[1:-1]:
      result = math_ops.logical_and(result, v.is_initialized())
    result = math_ops.logical_and(result, values_list[-1].is_initialized(),
                                  name=name)
    return result
示例#25
0
  def is_initialized(self, name=None):
    """Identifies if all the component variables are initialized.

    Args:
      name: Name of the final `logical_and` op.

    Returns:
      The op that evaluates to True or False depending on if all the
      component variables are initialized.
    """
    # We have to cast the self._index.values() to a `list` because when we
    # use `model_to_estimator` to run tf.keras models, self._index.values() is
    # of type `dict_values` and not `list`.
    values_list = list(self._index.values())
    result = values_list[0].is_initialized()
    # We iterate through the list of values except the last one to allow us to
    # name the final `logical_and` op the same name that is passed by the user
    # to the `is_initialized` op. For distributed variables, the
    # `is_initialized` op is a `logical_and` op.
    for v in values_list[1:-1]:
      result = math_ops.logical_and(result, v.is_initialized())
    result = math_ops.logical_and(result, values_list[-1].is_initialized(),
                                  name=name)
    return result
示例#26
0
def segment_iou(predicts, labels):
    """
    :param predicts: shape [batch_size, h, w, c]
    :param labels: shape [batch_size, h, w]
    """
    num_classes = predicts.shape[-1]
    labels = array_ops.one_hot(labels, depth=num_classes)
    predicts = engine.bool(predicts)
    labels = engine.bool(labels)
    inter = engine.float32(math_ops.logical_and(predicts, labels))
    union = engine.float32(math_ops.logical_or(predicts, labels))
    inter = math_ops.reduce_sum(inter, axis=[0, 1, 2])
    union = math_ops.reduce_sum(union, axis=[0, 1, 2])
    classes_iou = inter / union
    mean_iou = math_ops.reduce_mean(classes_iou)
    return mean_iou, classes_iou
示例#27
0
 def _process_matrix(self, matrix, min_rank, event_ndims):
   """Helper to __init__ which gets matrix in batch-ready form."""
   # Pad the matrix so that matmul works in the case of a matrix and vector
   # input. Keep track if the matrix was padded, to distinguish between a
   # rank 3 tensor and a padded rank 2 tensor.
   # TODO(srvasude): Remove side-effects from functions. Its currently unbroken
   # but error-prone since the function call order may change in the future.
   self._rank_two_event_ndims_one = math_ops.logical_and(
       math_ops.equal(array_ops.rank(matrix), min_rank),
       math_ops.equal(event_ndims, 1))
   left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
   pad = array_ops.concat(
       [array_ops.ones(
           [left], dtype=dtypes.int32), array_ops.shape(matrix)],
       0)
   return array_ops.reshape(matrix, pad)
示例#28
0
            def func(sliced_table, indices, min_idx, max_idx):
                # Do a serialized embedding lookup by adjusting the indices.
                adjusted_indices = indices - min_idx
                x = gen_popops_ops.ipu_multi_slice(sliced_table,
                                                   adjusted_indices,
                                                   name=name)

                # Mask out any outputs which are not in range [min_idx, max_idx).
                mask_max = indices < max_idx
                mask_min = indices >= min_idx
                mask = math_ops.logical_and(mask_max, mask_min)
                mask = array_ops.expand_dims(mask, 1)
                return array_ops.where_v2(mask,
                                          x,
                                          array_ops.constant(0, x.dtype),
                                          name=name + "/Mask")
示例#29
0
 def _process_matrix(self, matrix, min_rank, event_ndims):
     """Helper to __init__ which gets matrix in batch-ready form."""
     # Pad the matrix so that matmul works in the case of a matrix and vector
     # input. Keep track if the matrix was padded, to distinguish between a
     # rank 3 tensor and a padded rank 2 tensor.
     # TODO(srvasude): Remove side-effects from functions. Its currently unbroken
     # but error-prone since the function call order may change in the future.
     self._rank_two_event_ndims_one = math_ops.logical_and(
         math_ops.equal(array_ops.rank(matrix), min_rank),
         math_ops.equal(event_ndims, 1))
     left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
     pad = array_ops.concat([
         array_ops.ones([left], dtype=dtypes.int32),
         array_ops.shape(matrix)
     ], 0)
     return array_ops.reshape(matrix, pad)
示例#30
0
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
    """Returns whether actual_tensor's shape is expected_shape.

  Args:
    expected_shape: Integer list defining the expected shape, or tensor of same.
    actual_tensor: Tensor to test.
    actual_shape: Shape of actual_tensor, if we already have it.
  Returns:
    New tensor.
  """
    with ops.op_scope([actual_tensor], "is_shape") as scope:
        is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
        if actual_shape is None:
            actual_shape = array_ops.shape(actual_tensor, name="actual")
        shape_equal = _all_equal(ops.convert_to_tensor(expected_shape, name="expected"), actual_shape)
        return math_ops.logical_and(is_rank, shape_equal, name=scope)
示例#31
0
def _UnsortedSegmentMinOrMaxGrad(op, grad):
  """ Gradient for UnsortedSegmentMin and UnsortedSegmentMax. """
  # Get the number of selected (minimum or maximum) elements in each segment.
  gathered_outputs, zero_clipped_indices, is_positive = \
      _GatherDropNegatives(op.outputs[0], op.inputs[1])
  is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
  is_selected = math_ops.logical_and(is_selected, is_positive)
  num_selected = math_ops.unsorted_segment_sum(
      math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])
  # Compute the gradient for each segment. The gradient for the ith segment is
  # divided evenly among the selected elements in that segment.
  weighted_grads = math_ops.div(grad, num_selected)
  gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None,
                                              zero_clipped_indices,
                                              is_positive)
  zeros = array_ops.zeros_like(gathered_grads)
  return array_ops.where(is_selected, gathered_grads, zeros), None, None
示例#32
0
    def wrapped_cond(loop_counter, maximum_iterations_arg, *args):
      """Extra `cond` wrapper that can handle the extra counter loop_var."""
      # Convert the flow variables in `args` to TensorArrays. `args` should
      # already have the same structure as `orig_loop_vars` but currently there
      # is no nest.zip so we call `_pack_sequence_as` which flattens both
      # `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays
      # and packs it into the structure of `orig_loop_vars`.
      pred = cond(*_pack_sequence_as(orig_loop_vars, args))
      if (tensor_util.is_tensor(pred) and
          (pred.shape.dims is None or pred.shape.dims)):
        pred = array_ops.squeeze_v2(pred)

      if maximum_iterations is None:
        return pred
      else:
        return math_ops.logical_and(
            loop_counter < maximum_iterations_arg, pred)
示例#33
0
def _UnsortedSegmentMinOrMaxGrad(op, grad):
    """ Gradient for UnsortedSegmentMin and UnsortedSegmentMax. """
    # Get the number of selected (minimum or maximum) elements in each segment.
    gathered_outputs, zero_clipped_indices, is_positive = \
        _GatherDropNegatives(op.outputs[0], op.inputs[1])
    is_selected = math_ops.equal(op.inputs[0], gathered_outputs)
    is_selected = math_ops.logical_and(is_selected, is_positive)
    num_selected = math_ops.unsorted_segment_sum(
        math_ops.cast(is_selected, grad.dtype), op.inputs[1], op.inputs[2])
    # Compute the gradient for each segment. The gradient for the ith segment is
    # divided evenly among the selected elements in that segment.
    weighted_grads = math_ops.div(grad, num_selected)
    gathered_grads, _, _ = _GatherDropNegatives(weighted_grads, None,
                                                zero_clipped_indices,
                                                is_positive)
    zeros = array_ops.zeros_like(gathered_grads)
    return array_ops.where(is_selected, gathered_grads, zeros), None, None
示例#34
0
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
    """Returns whether actual_tensor's shape is expected_shape.

  Note that -1 in `expected_shape` is recognized as unknown dimension.

  Args:
    expected_shape: Integer list defining the expected shape, or tensor of same.
    actual_tensor: Tensor to test.
    actual_shape: Shape of actual_tensor, if we already have it.
  Returns:
    New tensor.
  """
    with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
        is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
        if actual_shape is None:
            actual_shape = array_ops.shape(actual_tensor, name='actual')
        shape_equal = _shape_tensor_compatible(expected_shape, actual_shape)
        return math_ops.logical_and(is_rank, shape_equal, name=scope)
示例#35
0
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
  """Returns whether actual_tensor's shape is expected_shape.

  Note that -1 in `expected_shape` is recognized as unknown dimension.

  Args:
    expected_shape: Integer list defining the expected shape, or tensor of same.
    actual_tensor: Tensor to test.
    actual_shape: Shape of actual_tensor, if we already have it.
  Returns:
    New tensor.
  """
  with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
    is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
    if actual_shape is None:
      actual_shape = array_ops.shape(actual_tensor, name='actual')
    shape_equal = _shape_tensor_compatible(expected_shape, actual_shape)
    return math_ops.logical_and(is_rank, shape_equal, name=scope)
示例#36
0
def _tf_dataset_len(s):
  l = cardinality.cardinality(s)
  msg = gen_string_ops.string_join([
      'len requires dataset with definitive cardinality, got ',
      gen_string_ops.as_string(l)
  ])
  # TODO (yongtang): UNKNOWN is treated as an error.
  # In case there are more UNKNOWN cases for dataset, we could
  # use dataset.reduce() to find out the length (in an expensive way).
  with ops.control_dependencies([
      control_flow_ops.Assert(
          math_ops.logical_and(
              math_ops.not_equal(l, cardinality.INFINITE),
              math_ops.not_equal(l, cardinality.UNKNOWN)), [msg])
  ]):
    l = array_ops.identity(l)

  return l
示例#37
0
            def predicting_decode(i, input_data, hidden_state, predicted_ids, parent_ids,
                                  sequence_lengths, finished, scores):
                outputs, next_state, next_inputs, decoder_finished = beam_search_decoder.step(
                    i, input_data, hidden_state
                )

                next_finished = math_ops.logical_or(decoder_finished, finished)
                next_finished = math_ops.logical_or(
                    next_finished, i + 1 >= num_decoder_output)
                next_sequence_lengths = array_ops.where(
                    math_ops.logical_and(math_ops.logical_not(finished), next_finished),
                    array_ops.fill(array_ops.shape(sequence_lengths), i + 1), sequence_lengths)

                states = get_final_state(next_state)
                predicted_ids = predicted_ids.write(i, outputs.predicted_ids)
                parent_ids = parent_ids.write(i, outputs.parent_ids)
                scores = scores.write(i, outputs.scores)
                return i + 1, next_inputs, states, predicted_ids, parent_ids, \
                       next_sequence_lengths, next_finished, scores
示例#38
0
 def fn_with_cond(*inner_args, **inner_kwds):
   """Conditionally runs initialization if it's needed."""
   condition = True
   for wr in self._created_variables:
     variable = wr()
     if variable is None:
       raise ValueError(
           "Variable created in a tf.function garbage-collected. Code needs"
           " to keep python references to variables created in a"
           " tf.function.")
     condition = math_ops.logical_and(
         condition, resource_variable_ops.var_is_initialized_op(
             variable.handle))
   # We want to call stateless_fn if possible because it avoids recomputing
   # potentially expensive initializers.
   return control_flow_ops.cond(
       condition,
       lambda: self._stateless_fn(*inner_args, **inner_kwds),
       _call_concrete(self._concrete_stateful_fn, inner_args, inner_kwds))
示例#39
0
 def fn_with_cond(*inner_args, **inner_kwds):
   """Conditionally runs initialization if it's needed."""
   condition = True
   for wr in self._created_variables:
     variable = wr()
     if variable is None:
       raise ValueError(
           "Variable created in a tf.function garbage-collected. Code needs"
           " to keep python references to variables created in a"
           " tf.function.")
     condition = math_ops.logical_and(
         condition, resource_variable_ops.var_is_initialized_op(
             variable.handle))
   # We want to call stateless_fn if possible because it avoids recomputing
   # potentially expensive initializers.
   return control_flow_ops.cond(
       condition,
       lambda: self._stateless_fn(*inner_args, **inner_kwds),
       _call_concrete(self._concrete_stateful_fn, inner_args, inner_kwds))
示例#40
0
 def _mode(self):
   mode = (self.a - 1.)/ (self.a_b_sum - 2.)
   if self.allow_nan_stats:
     nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
     return array_ops.where(
         math_ops.logical_and(
             math_ops.greater(self.a, 1.),
             math_ops.greater(self.b, 1.)),
         mode,
         array_ops.fill(self.batch_shape(), nan, name="nan"))
   else:
     return control_flow_ops.with_dependencies([
         check_ops.assert_less(
             array_ops.ones((), dtype=self.dtype), self.a,
             message="Mode not defined for components of a <= 1."),
         check_ops.assert_less(
             array_ops.ones((), dtype=self.dtype), self.b,
             message="Mode not defined for components of b <= 1."),
     ], mode)
def t_power(logu, t, self_normalized=False, name=None):
  """The T-Power Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the T-Power Csiszar-function is:

  ```none
  f(u) = s [ u**t - 1 - t(u - 1) ]
  s = { -1   0 < t < 1
      { +1   otherwise
  ```

  When `self_normalized = False` the `- t(u - 1)` term is omitted.

  This is similar to the `amari_alpha` Csiszar-function, with the associated
  divergence being the same up to factors depending only on `t`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    t:  `Tensor` of same `dtype` as `logu` and broadcastable shape.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """
  with ops.name_scope(name, "t_power", [logu, t]):
    logu = ops.convert_to_tensor(logu, name="logu")
    t = ops.convert_to_tensor(t, dtype=logu.dtype.base_dtype, name="t")
    fu = math_ops.expm1(t * logu)
    if self_normalized:
      fu -= t * math_ops.expm1(logu)
    fu *= array_ops.where(math_ops.logical_and(0. < t, t < 1.),
                          -array_ops.ones_like(t),
                          array_ops.ones_like(t))
    return fu
示例#42
0
def ordered_pair_accuracy(labels, predictions, weights=None, name=None):
    """Computes the percentage of correctedly ordered pair.

    For any pair of examples, we compare their orders determined by `labels` and
    `predictions`. They are correctly ordered if the two orders are compatible.
    That is, labels l_i > l_j and predictions s_i > s_j and the weight for this
    pair is the weight from the l_i.

    Args:
      labels: A `Tensor` of the same shape as `predictions`.
      predictions: A `Tensor` with shape [batch_size, list_size]. Each value is
        the ranking score of the corresponding example.
      weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The
        former case is per-example and the latter case is per-list.
      name: A string used as the name for this metric.

    Returns:
      A metric for the accuracy or ordered pairs.
    """
    with ops.name_scope(name, 'ordered_pair_accuracy',
                        (labels, predictions, weights)):
        clean_labels, predictions, weights, _ = _prepare_and_validate_params(
            labels, predictions, weights)
        label_valid = math_ops.equal(clean_labels, labels)
        valid_pair = math_ops.logical_and(
            array_ops.expand_dims(label_valid, 2),
            array_ops.expand_dims(label_valid, 1))
        pair_label_diff = array_ops.expand_dims(
            clean_labels, 2) - array_ops.expand_dims(clean_labels, 1)
        pair_pred_diff = array_ops.expand_dims(
            predictions, 2) - array_ops.expand_dims(predictions, 1)
        # Correct pairs are represented twice in the above pair difference tensors.
        # We only take one copy for each pair.
        # correct_pairs = math_ops.to_float(pair_label_diff > 0) * math_ops.to_float(pair_pred_diff > 0)
        correct_pairs = tf.cast(pair_label_diff > 0, tf.float32) * tf.cast(
            pair_pred_diff > 0, tf.float32)
        # pair_weights = math_ops.to_float(pair_label_diff > 0) * array_ops.expand_dims(
        #         weights, 2) * math_ops.to_float(valid_pair)
        pair_weights = tf.cast(
            pair_label_diff > 0, tf.float32) * array_ops.expand_dims(
                weights, 2) * tf.cast(valid_pair, tf.float32)
        return math_ops.reduce_mean(correct_pairs * pair_weights)
示例#43
0
文件: beta.py 项目: zqkou/tensorflow
    def mode(self, name="mode"):
        """Mode of the distribution.

    Note that the mode for the Beta distribution is only defined
    when `a > 1`, `b > 1`. This returns the mode when `a > 1` and `b > 1`,
    and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
    will be raised rather than returning `NaN`.

    Args:
      name: The name for this op.

    Returns:
      Mode of the Beta distribution.
    """
        with ops.name_scope(self.name):
            with ops.name_scope(name, values=[self._a, self._b,
                                              self._a_b_sum]):
                a = self._a
                b = self._b
                a_b_sum = self._a_b_sum
                one = constant_op.constant(1, self.dtype)
                mode = (a - 1) / (a_b_sum - 2)

                if self.allow_nan_stats:
                    return math_ops.select(
                        math_ops.logical_and(math_ops.greater(a, 1),
                                             math_ops.greater(b, 1)), mode,
                        (constant_op.constant(float("NaN"), dtype=self.dtype) *
                         array_ops.ones_like(a_b_sum, dtype=self.dtype)))
                else:
                    return control_flow_ops.with_dependencies([
                        check_ops.assert_less(
                            one,
                            a,
                            message="mode not defined for components of a <= 1"
                        ),
                        check_ops.assert_less(
                            one,
                            b,
                            message="mode not defined for components of b <= 1"
                        )
                    ], mode)
示例#44
0
def _should_record_summaries_internal(default_state):
    """Returns boolean Tensor if summaries should/shouldn't be recorded.

  Now the summary condition is decided by logical "and" of two conditions:
  ctx.summary_recording and ctx.summary_recording_distribution_strategy. The
  former one is usually set by user, and the latter one is controlled by
  DistributionStrategy (tf.distribute.ReplicaContext).

  Args:
    default_state: can be True or False. The default summary behavior when user
      does not specify ctx.summary_recording and
      ctx.summary_recording_distribution_strategy is True.
  """
    ctx = context.context()
    resolve = lambda x: x() if callable(x) else x
    cond_distributed = resolve(ctx.summary_recording_distribution_strategy)
    cond = resolve(ctx.summary_recording)
    if cond is None:
        cond = default_state
    return math_ops.logical_and(cond_distributed, cond)
示例#45
0
 def _mode(self):
     mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
     if self.allow_nan_stats:
         nan = array_ops.fill(self.batch_shape_tensor(),
                              np.array(np.nan,
                                       dtype=self.dtype.as_numpy_dtype()),
                              name="nan")
         is_defined = math_ops.logical_and(self.concentration1 > 1.,
                                           self.concentration0 > 1.)
         return array_ops.where(is_defined, mode, nan)
     return control_flow_ops.with_dependencies([
         check_ops.assert_less(
             array_ops.ones([], dtype=self.dtype),
             self.concentration1,
             message="Mode undefined for concentration1 <= 1."),
         check_ops.assert_less(
             array_ops.ones([], dtype=self.dtype),
             self.concentration0,
             message="Mode undefined for concentration0 <= 1.")
     ], mode)
示例#46
0
def _should_record_summaries_internal(default_state):
  """Returns boolean Tensor if summaries should/shouldn't be recorded.

  Now the summary condition is decided by logical "and" of two conditions:
  ctx.summary_recording and ctx.summary_recording_distribution_strategy. The
  former one is usually set by user, and the latter one is controlled by
  DistributionStrategy (tf.distribute.ReplicaContext).

  Args:
    default_state: can be True or False. The default summary behavior when user
      does not specify ctx.summary_recording and
      ctx.summary_recording_distribution_strategy is True.
  """
  ctx = context.context()
  resolve = lambda x: x() if callable(x) else x
  cond_distributed = resolve(ctx.summary_recording_distribution_strategy)
  cond = resolve(ctx.summary_recording)
  if cond is None:
    cond = default_state
  return math_ops.logical_and(cond_distributed, cond)
示例#47
0
 def _mode(self):
   mode = (self.concentration1 - 1.) / (self.total_concentration - 2.)
   if self.allow_nan_stats:
     nan = array_ops.fill(
         self.batch_shape_tensor(),
         np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
         name="nan")
     is_defined = math_ops.logical_and(self.concentration1 > 1.,
                                       self.concentration0 > 1.)
     return array_ops.where(is_defined, mode, nan)
   return control_flow_ops.with_dependencies([
       check_ops.assert_less(
           array_ops.ones([], dtype=self.dtype),
           self.concentration1,
           message="Mode undefined for concentration1 <= 1."),
       check_ops.assert_less(
           array_ops.ones([], dtype=self.dtype),
           self.concentration0,
           message="Mode undefined for concentration0 <= 1.")
   ], mode)
示例#48
0
def t_power(logu, t, self_normalized=False, name=None):
    """The T-Power Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the T-Power Csiszar-function is:

  ```none
  f(u) = s [ u**t - 1 - t(u - 1) ]
  s = { -1   0 < t < 1
      { +1   otherwise
  ```

  When `self_normalized = False` the `- t(u - 1)` term is omitted.

  This is similar to the `amari_alpha` Csiszar-function, with the associated
  divergence being the same up to factors depending only on `t`.

  Args:
    logu: `float`-like `Tensor` representing `log(u)` from above.
    t:  `Tensor` of same `dtype` as `logu` and broadcastable shape.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    t_power_of_u: `float`-like `Tensor` of the Csiszar-function evaluated
      at `u = exp(logu)`.
  """
    with ops.name_scope(name, "t_power", [logu, t]):
        logu = ops.convert_to_tensor(logu, name="logu")
        t = ops.convert_to_tensor(t, dtype=logu.dtype.base_dtype, name="t")
        fu = math_ops.expm1(t * logu)
        if self_normalized:
            fu -= t * math_ops.expm1(logu)
        fu *= array_ops.where(math_ops.logical_and(0. < t, t < 1.),
                              -array_ops.ones_like(t), array_ops.ones_like(t))
        return fu
def _check_batch_beam(t, batch_size, beam_width):
    """Returns an Assert operation checking that the elements of the stacked
  TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
  the TensorArray elements have a known rank of at least 1.
  """
    error_message = (
        "TensorArray reordering expects elements to be "
        "reshapable to [batch_size, beam_size, -1] which is "
        "incompatible with the dynamic shape of %s elements. "
        "Consider setting reorder_tensor_arrays to False to disable "
        "TensorArray reordering during the beam search." % (t.name))
    rank = t.shape.ndims
    shape = array_ops.shape(t)
    if rank == 2:
        condition = math_ops.equal(shape[1], batch_size * beam_width)
    else:
        condition = math_ops.logical_or(
            math_ops.equal(shape[1], batch_size * beam_width),
            math_ops.logical_and(math_ops.equal(shape[1], batch_size),
                                 math_ops.equal(shape[2], beam_width)))
    return control_flow_ops.Assert(condition, [error_message])
def _assert_sparse_indices_are_ragged_right(indices):
  """Checks that the given SparseTensor.indices tensor is ragged-right.

  Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right
  because the entry `[3, 1]` skips a cell.

  Args:
    indices: The SparseTensor indices to check.

  Returns:
    A list of control dependency op tensors.
  """
  index_prefix = indices[:, :-1]
  index_suffix = indices[:, -1]

  # Check whether each index is starting a new row in the innermost dimension
  # (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
  # (Note: this skips the first index; we will check that separately below.)
  index_prefix_changed = math_ops.reduce_any(
      math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)

  # Check two cases:
  #   * For indices that start a new row: index_suffix[i] must be zero.
  #   * For indices that continue a row: index_suffix[i] must be equal to
  #     index_suffix[i-1]+1.
  index_ok = array_ops.where(
      index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
      math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))

  # Also check that the very first index didn't skip any cells.  The first
  # index starts a new row (by definition), so its suffix should be zero.
  sparse_indices_are_ragged_right = math_ops.logical_and(
      math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
      math_ops.reduce_all(index_ok))

  message = [
      'SparseTensor is not right-ragged',
      'SparseTensor.indices =', indices
  ]
  return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
def _assert_sparse_indices_are_ragged_right(indices):
    """Checks that the given SparseTensor.indices tensor is ragged-right.

  Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right
  because the entry `[3, 1]` skips a cell.

  Args:
    indices: The SparseTensor indices to check.

  Returns:
    A list of control dependency op tensors.
  """
    index_prefix = indices[:, :-1]
    index_suffix = indices[:, -1]

    # Check whether each index is starting a new row in the innermost dimension
    # (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
    # (Note: this skips the first index; we will check that separately below.)
    index_prefix_changed = math_ops.reduce_any(math_ops.not_equal(
        index_prefix[1:], index_prefix[:-1]),
                                               axis=1)

    # Check two cases:
    #   * For indices that start a new row: index_suffix[i] must be zero.
    #   * For indices that continue a row: index_suffix[i] must be equal to
    #     index_suffix[i-1]+1.
    index_ok = array_ops.where(
        index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
        math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))

    # Also check that the very first index didn't skip any cells.  The first
    # index starts a new row (by definition), so its suffix should be zero.
    sparse_indices_are_ragged_right = math_ops.logical_and(
        math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
        math_ops.reduce_all(index_ok))

    message = [
        'SparseTensor is not right-ragged', 'SparseTensor.indices =', indices
    ]
    return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
def _check_batch_beam(t, batch_size, beam_width):
  """Returns an Assert operation checking that the elements of the stacked
  TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
  the TensorArray elements have a known rank of at least 1.
  """
  error_message = ("TensorArray reordering expects elements to be "
                   "reshapable to [batch_size, beam_size, -1] which is "
                   "incompatible with the dynamic shape of %s elements. "
                   "Consider setting reorder_tensor_arrays to False to disable "
                   "TensorArray reordering during the beam search."
                   % (t.name))
  rank = t.shape.ndims
  shape = array_ops.shape(t)
  if rank == 2:
    condition = math_ops.equal(shape[1], batch_size * beam_width)
  else:
    condition = math_ops.logical_or(
        math_ops.equal(shape[1], batch_size * beam_width),
        math_ops.logical_and(
            math_ops.equal(shape[1], batch_size),
            math_ops.equal(shape[2], beam_width)))
  return control_flow_ops.Assert(condition, [error_message])
示例#53
0
def _should_record_summaries_internal(default_state):
  """Returns boolean Tensor if summaries should/shouldn't be recorded.

  Now the summary condition is decided by logical "and" of below conditions:
  First, summary writer must be set. Given this constraint is met,
  ctx.summary_recording and ctx.summary_recording_distribution_strategy.
  The former one is usually set by user, and the latter one is controlled
  by DistributionStrategy (tf.distribute.ReplicaContext).

  Args:
    default_state: can be True or False. The default summary behavior when
    summary writer is set and the user does not specify
    ctx.summary_recording and ctx.summary_recording_distribution_strategy
    is True.
  """
  if _summary_state.writer is None:
    return constant_op.constant(False)

  resolve = lambda x: x() if callable(x) else x
  cond_distributed = resolve(_summary_state.is_recording_distribution_strategy)
  cond = resolve(_summary_state.is_recording)
  if cond is None:
    cond = default_state
  return math_ops.logical_and(cond_distributed, cond)
示例#54
0
 def IterCondition(i, mat_m, _):
   return math_ops.logical_and(
       i < iter_count,
       math_ops.reduce_max(math_ops.abs(mat_m - identity)) > epsilon)
 def stopping_criterion(i, state):
     return math_ops.logical_and(i < max_iter,
                                 linalg_ops.norm(state.r) > tol)
示例#56
0
def _ConcatGrad(op, grad):
  """Gradient for concat op."""

  def _CreateDenseMaskAndBegin(sizes, concat_dim):
    """Create variables for iteratively slicing a dense gradients tensor."""
    # Since shape is 1-D, shape_of_shape = [rank-of-inputs]
    shape_of_shape = array_ops.shape(sizes[0])
    # Make a vector of length equal to the input's dimensions,
    # with 0's everywhere and 1 in the concat dim position.
    # Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
    mask = array_ops.concat(0,
                            [array_ops.fill(
                                array_ops.expand_dims(concat_dim, 0), 0),
                             [1],
                             array_ops.fill(
                                 shape_of_shape - concat_dim - 1, 0)])
    begin = array_ops.fill(shape_of_shape, 0)
    return mask, begin

  # Degenerate concatenation, just return grad.
  if len(op.inputs) == 2:
    return [None, grad]

  concat_dim = op.inputs[0]
  out_grads = []
  if isinstance(grad, ops.Tensor):
    # Get the inputs' tensor shapes
    sizes = array_ops.shape_n(op.inputs[1:])
    # pylint: disable=protected-access
    offset = gen_array_ops._concat_offset(concat_dim, sizes)
    # pylint: enable=protected-access
    for (begin, size) in zip(offset, sizes):
      out_grads.append(array_ops.slice(grad, begin, size))
  elif isinstance(grad, ops.IndexedSlices):
    concat_dim_static = tensor_util.constant_value(concat_dim)
    if concat_dim_static is None:
      raise ValueError("Can only compute IndexedSlices gradient with "
                       "statically-known concat_dim")
    # Get the inputs' tensor shapes
    sizes = [array_ops.shape(x) for x in op.inputs[1:]]
    if concat_dim_static > 0:
      # IndexedSlices, concat_dim > 0. Each input gets IndexedSlices gradients
      # with all the indices, but with grad.values sliced accordingly. This
      # is like the Tensor case, except shape(grad.values)[0] is not equal to
      # shape(sizes[i])[0], since only a subset of the dim-0 values are stored.
      mask, begin = _CreateDenseMaskAndBegin(sizes, concat_dim)
      for size in sizes:
        new_values = array_ops.slice(
            grad.values,
            begin,
            array_ops.concat(0, [[-1], array_ops.slice(size, [1], [-1])]))
        out_grads.append(
            ops.IndexedSlices(new_values, grad.indices, size))
        # Lint complains begin = begin + ...
        begin = math_ops.add(begin, size * mask)
    else:
      # IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
      # only for the relevant indices.
      start = constant_op.constant(0, dtype=grad.indices.dtype)
      for size in sizes:
        size_concat_dim = array_ops.gather(size, concat_dim)
        if size_concat_dim.dtype != grad.indices.dtype:
          size_concat_dim = math_ops.cast(size_concat_dim,
                                          dtype=grad.indices.dtype)
        end = start + size_concat_dim
        # Compute the 1-D Tensor of indices relevant for this input.
        indices_to_select = array_ops.squeeze(
            array_ops.where(math_ops.logical_and(grad.indices >= start,
                                                 grad.indices < end)),
            squeeze_dims=[1])
        new_indices = array_ops.gather(grad.indices, indices_to_select) - start
        new_values = array_ops.gather(grad.values, indices_to_select)
        out_grads.append(
            ops.IndexedSlices(new_values, new_indices, size))
        start = end
  else:
    raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))

  return [None] + out_grads
示例#57
0
def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
  """Gradient for concat op.

  Args:
    op: An operation.
    grad: `Tensor` or `IndexedSlices` representing the gradients with respect
      to each output of the op.
    start_value_index: An integer index of the first value in the op.inputs.
    end_value_index: An integer index of the last value in the op.inputs.
    dim_index: An interger index of concat_dim or axis parameter in op.inputs.

  Returns:
    Tensors represending the partial gradients with respect to each input
    of the op.

  Raises:
    ValueError: if concat_dim/axis is not statically known.
  """

  def _CreateDenseMaskAndBegin(sizes, concat_dim):
    """Create variables for iteratively slicing a dense gradients tensor."""
    # Since shape is 1-D, shape_of_shape = [rank-of-inputs]
    shape_of_shape = array_ops.shape(sizes[0])
    # Make a vector of length equal to the input's dimensions,
    # with 0's everywhere and 1 in the concat dim position.
    # Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
    mask = array_ops.concat([
        array_ops.fill(array_ops.expand_dims(concat_dim, 0), 0), [1],
        array_ops.fill(shape_of_shape - concat_dim - 1, 0)
    ], 0)
    begin = array_ops.fill(shape_of_shape, 0)
    return mask, begin

  def _ExtractInputShapes(inputs):
    """Extract the shapes of a set of input tensors."""
    sizes = []
    fully_known = True
    for x in inputs:
      input_shape = array_ops.shape(x)
      if not isinstance(input_shape,
                        ops.Tensor) or input_shape.op.type != "Const":
        fully_known = False
        break
      else:
        sizes.append(input_shape)

    if fully_known:
      return sizes
    else:
      return array_ops.shape_n(inputs)

  # Degenerate concatenation, just return grad.
  if len(op.inputs) == 2:
    return grad + [None] if end_value_index <= dim_index else [None] + grad

  concat_dim = op.inputs[dim_index]
  input_values = op.inputs[start_value_index:end_value_index]
  # Using mod here for convenience since concat_dim is already verified
  # in concat implementation to be within the allowed [-rank, rank) range.
  non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])

  out_grads = []
  if isinstance(grad, ops.Tensor):
    # Get the inputs' tensor shapes
    sizes = _ExtractInputShapes(input_values)
    # The magic number of 16 was found through benchmarking a range of sizes
    # on CPUs and a Maxwell TitanX.  A speedup was seen in a large majority of
    # cases when switching implementations at N=16, but it is possible that
    # there will be a small number of performance regressions.
    # pylint: disable=protected-access
    if len(sizes) > 16:
      # extract the size of each input along the concat dimension
      sizes = array_ops.squeeze(
          array_ops.slice(
              array_ops.stack(
                  sizes, axis=1), [non_neg_concat_dim, 0], [1, -1]))
      out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
    else:
      offset = gen_array_ops._concat_offset(non_neg_concat_dim, sizes)
      for (begin, size) in zip(offset, sizes):
        out_grads.append(array_ops.slice(grad, begin, size))
    # pylint: enable=protected-access
  elif isinstance(grad, ops.IndexedSlices):
    concat_dim_static = tensor_util.constant_value(concat_dim)
    if concat_dim_static is None:
      raise ValueError("Can only compute IndexedSlices gradient with "
                       "statically-known concat_dim")
    if concat_dim_static < 0:
      rank = tensor_util.constant_value(array_ops.rank(input_values[0]))
      if rank is None:
        raise ValueError("Can only compute IndexedSlices gradient with "
                         "negative concat_dim when first value rank is "
                         "statically-known.")
      concat_dim_static %= rank
    # Get the inputs' tensor shapes
    sizes = [array_ops.shape(x) for x in input_values]
    if concat_dim_static > 0:
      # IndexedSlices, non_neg_concat_dim > 0. Each input gets IndexedSlices
      # gradients with all the indices, but with grad.values sliced accordingly.
      # This is like the Tensor case, except shape(grad.values)[0] is not equal
      # to shape(sizes[i])[0], since only a subset of the dim-0 values are
      # stored.
      mask, begin = _CreateDenseMaskAndBegin(sizes, non_neg_concat_dim)
      for size in sizes:
        new_values = array_ops.slice(
            grad.values, begin,
            array_ops.concat([[-1], array_ops.slice(size, [1], [-1])], 0))
        out_grads.append(
            ops.IndexedSlices(new_values, grad.indices, size))
        # Lint complains begin = begin + ...
        begin = math_ops.add(begin, size * mask)
    else:
      # IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
      # only for the relevant indices.
      start = constant_op.constant(0, dtype=grad.indices.dtype)
      for size in sizes:
        size_concat_dim = array_ops.gather(size, non_neg_concat_dim)
        if size_concat_dim.dtype != grad.indices.dtype:
          size_concat_dim = math_ops.cast(size_concat_dim,
                                          dtype=grad.indices.dtype)
        end = start + size_concat_dim
        # Compute the 1-D Tensor of indices relevant for this input.
        indices_to_select = array_ops.squeeze(
            array_ops.where(math_ops.logical_and(grad.indices >= start,
                                                 grad.indices < end)),
            squeeze_dims=[1])
        new_indices = array_ops.gather(grad.indices, indices_to_select) - start
        new_values = array_ops.gather(grad.values, indices_to_select)
        out_grads.append(
            ops.IndexedSlices(new_values, new_indices, size))
        start = end
  else:
    raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))

  return (out_grads + [None] if end_value_index <= dim_index
          else [None] + out_grads)