コード例 #1
0
ファイル: pruning.py プロジェクト: SylChan/tensorflow
 def maybe_update_masks():
   with ops.name_scope(self._spec.name):
     is_step_within_pruning_range = math_ops.logical_and(
         math_ops.greater_equal(self._global_step,
                                self._spec.begin_pruning_step),
         # If end_pruning_step is negative, keep pruning forever!
         math_ops.logical_or(
             math_ops.less_equal(self._global_step,
                                 self._spec.end_pruning_step),
             math_ops.less(self._spec.end_pruning_step, 0)))
     is_pruning_step = math_ops.less_equal(
         math_ops.add(self._last_update_step, self._spec.pruning_frequency),
         self._global_step)
     return math_ops.logical_and(is_step_within_pruning_range,
                                 is_pruning_step)
コード例 #2
0
ファイル: linear.py プロジェクト: KalraA/tensorflow
def _softmax_cross_entropy_loss(logits, target):
  check_shape_op = control_flow_ops.Assert(
      math_ops.less_equal(array_ops.rank(target), 2),
      ["target's shape should be either [batch_size, 1] or [batch_size]"])
  with ops.control_dependencies([check_shape_op]):
    target = array_ops.reshape(target, shape=[array_ops.shape(target)[0]])
  return nn.sparse_softmax_cross_entropy_with_logits(logits, target)
コード例 #3
0
 def testPositive(self):
   n = int(10e3)
   for dt in [dtypes.float16, dtypes.float32, dtypes.float64]:
     with self.cached_session():
       x = random_ops.random_gamma(shape=[n], alpha=0.001, dtype=dt, seed=0)
       self.assertEqual(0, math_ops.reduce_sum(math_ops.cast(
           math_ops.less_equal(x, 0.), dtype=dtypes.int64)).eval())
コード例 #4
0
ファイル: linear.py プロジェクト: KalraA/tensorflow
def _hinge_loss(logits, target):
  check_shape_op = control_flow_ops.Assert(
      math_ops.less_equal(array_ops.rank(target), 2),
      ["target's shape should be either [batch_size, 1] or [batch_size]"])
  with ops.control_dependencies([check_shape_op]):
    target = array_ops.reshape(target, shape=[array_ops.shape(target)[0], 1])
  return losses.hinge_loss(logits, target)
コード例 #5
0
ファイル: check_ops.py プロジェクト: 2er0/tensorflow
def assert_less_equal(x, y, data=None, summarize=None, name=None):
  """Assert the condition `x <= y` holds element-wise.

  This condition holds if for every pair of (possibly broadcast) elements
  `x[i]`, `y[i]`, we have `x[i] <= y[i]`.
  If both `x` and `y` are empty, this is trivially satisfied.

  Args:
    x:  Numeric `Tensor`.
    y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`, `y`.
    summarize: Print this many entries of each tensor.
    name: A name for this operation (optional).  Defaults to "assert_less_equal"

  Returns:
    Op that raises `InvalidArgumentError` if `x <= y` is False.
  """
  with ops.op_scope([x, y, data], name, 'assert_less_equal'):
    x = ops.convert_to_tensor(x, name='x')
    y = ops.convert_to_tensor(y, name='y')
    if data is None:
      data = [
          'Condition x <= y did not hold element-wise: x = ', x.name, x, 'y = ',
          y.name, y
      ]
    condition = math_ops.reduce_all(math_ops.less_equal(x, y))
    return logging_ops.Assert(condition, data, summarize=summarize)
コード例 #6
0
ファイル: crf.py プロジェクト: Jordan1237/tensorflow
 def _single_seq_fn():
   log_norm = math_ops.reduce_logsumexp(first_input, [1])
   # Mask `log_norm` of the sequences with length <= zero.
   log_norm = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
                              array_ops.zeros_like(log_norm),
                              log_norm)
   return log_norm
コード例 #7
0
def assert_close(
    x, y, data=None, summarize=None, message=None, name="assert_close"):
  """Assert that that x and y are within machine epsilon of each other.

  Args:
    x: Numeric `Tensor`
    y: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
  """
  message = message or ""
  x = ops.convert_to_tensor(x, name="x")
  y = ops.convert_to_tensor(y, name="y")

  if x.dtype.is_integer:
    return check_ops.assert_equal(
        x, y, data=data, summarize=summarize, message=message, name=name)

  with ops.name_scope(name, "assert_close", [x, y, data]):
    tol = np.finfo(x.dtype.as_numpy_dtype).resolution
    if data is None:
      data = [
          message,
          "Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
          y.name, y
      ]
    condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
    return control_flow_ops.Assert(
        condition, data, summarize=summarize)
コード例 #8
0
def _filter_input(input_tensor, vocab_freq_table, vocab_min_count,
                  vocab_subsampling, corpus_size, seed):
  """Filters input tensor based on vocab freq, threshold, and subsampling."""
  if vocab_freq_table is None:
    return input_tensor

  if not isinstance(vocab_freq_table, lookup.InitializableLookupTableBase):
    raise ValueError(
        "vocab_freq_table must be a subclass of "
        "InitializableLookupTableBase (such as HashTable) instead of type "
        "{}.".format(type(vocab_freq_table)))

  with ops.name_scope(
      "filter_vocab", values=[vocab_freq_table, input_tensor, vocab_min_count]):
    freq = vocab_freq_table.lookup(input_tensor)
    # Filters out elements in input_tensor that are not found in
    # vocab_freq_table (table returns a default value of -1 specified above when
    # an element is not found).
    mask = math_ops.not_equal(freq, vocab_freq_table.default_value)

    # Filters out elements whose vocab frequencies are less than the threshold.
    if vocab_min_count is not None:
      cast_threshold = math_ops.cast(vocab_min_count, freq.dtype)
      mask = math_ops.logical_and(mask,
                                  math_ops.greater_equal(freq, cast_threshold))

    input_tensor = array_ops.boolean_mask(input_tensor, mask)
    freq = array_ops.boolean_mask(freq, mask)

  if not vocab_subsampling:
    return input_tensor

  if vocab_subsampling < 0 or vocab_subsampling > 1:
    raise ValueError(
        "Invalid vocab_subsampling={} - it should be within range [0, 1].".
        format(vocab_subsampling))

  # Subsamples the input tokens based on vocabulary frequency and
  # vocab_subsampling threshold (ie randomly discard commonly appearing
  # tokens).
  with ops.name_scope(
      "subsample_vocab", values=[input_tensor, freq, vocab_subsampling]):
    corpus_size = math_ops.cast(corpus_size, dtypes.float64)
    freq = math_ops.cast(freq, dtypes.float64)
    vocab_subsampling = math_ops.cast(vocab_subsampling, dtypes.float64)

    # From tensorflow_models/tutorials/embedding/word2vec_kernels.cc, which is
    # suppose to correlate with Eq. 5 in http://arxiv.org/abs/1310.4546.
    keep_prob = ((math_ops.sqrt(freq /
                                (vocab_subsampling * corpus_size)) + 1.0) *
                 (vocab_subsampling * corpus_size / freq))
    random_prob = random_ops.random_uniform(
        array_ops.shape(freq),
        minval=0,
        maxval=1,
        dtype=dtypes.float64,
        seed=seed)

    mask = math_ops.less_equal(random_prob, keep_prob)
    return array_ops.boolean_mask(input_tensor, mask)
コード例 #9
0
ファイル: head.py プロジェクト: HKUST-SING/tensorflow
 def loss_fn(logits, labels):
   check_shape_op = control_flow_ops.Assert(
       math_ops.less_equal(array_ops.rank(labels), 2),
       ["labels shape should be either [batch_size, 1] or [batch_size]"])
   with ops.control_dependencies([check_shape_op]):
     labels = array_ops.reshape(
         labels, shape=[array_ops.shape(labels)[0], 1])
   return losses.hinge_loss(logits, labels)
コード例 #10
0
ファイル: linear.py プロジェクト: KalraA/tensorflow
def _log_loss_with_two_classes(logits, target):
  check_shape_op = control_flow_ops.Assert(
      math_ops.less_equal(array_ops.rank(target), 2),
      ["target's shape should be either [batch_size, 1] or [batch_size]"])
  with ops.control_dependencies([check_shape_op]):
    target = array_ops.reshape(target, shape=[array_ops.shape(target)[0], 1])
  return nn.sigmoid_cross_entropy_with_logits(
      logits, math_ops.to_float(target))
コード例 #11
0
ファイル: head.py プロジェクト: lsuhpchelp/tensorflow_4qb2
 def _loss_fn(logits, labels):
     check_shape_op = control_flow_ops.Assert(
         math_ops.less_equal(array_ops.rank(labels), 2), [
             "labels shape should be either [batch_size, 1] or [batch_size]"
         ])
     with ops.control_dependencies([check_shape_op]):
         labels = array_ops.reshape(
             labels, shape=[array_ops.shape(labels)[0], 1])
     return losses.hinge_loss(logits, labels)
コード例 #12
0
 def _loss_fn(logits, labels):
   with ops.name_scope(None, "hinge_loss", (logits, labels)) as name:
     check_shape_op = control_flow_ops.Assert(
         math_ops.less_equal(array_ops.rank(labels), 2),
         ("labels shape should be either [batch_size, 1] or [batch_size]",))
     with ops.control_dependencies((check_shape_op,)):
       labels = array_ops.reshape(
           labels, shape=(array_ops.shape(labels)[0], 1))
     return losses.hinge_loss(logits, labels, scope=name)
コード例 #13
0
ファイル: crf.py プロジェクト: RyomaGuan/nlp
 def _single_seq_fn():
     batch_size = array_ops.shape(inputs, out_type=tag_indices.dtype)[0]  # (2)
     example_inds = array_ops.reshape(math_ops.range(batch_size, dtype=tag_indices.dtype), [-1, 1])  # [[0], [1]]
     sequence_scores = array_ops.gather_nd(array_ops.squeeze(inputs, [1]),
                                           array_ops.concat([example_inds, tag_indices], axis=1))
     sequence_scores = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
                                       array_ops.zeros_like(sequence_scores),
                                       sequence_scores)
     return sequence_scores
コード例 #14
0
ファイル: pruning_utils.py プロジェクト: Ajaycs99/tensorflow
 def loop_body(loop_count, cdf):
   temp = math_ops.reduce_sum(
       math_ops.cast(
           math_ops.less_equal(indices, loop_count), dtypes.float32))
   cdf = math_ops.add(
       cdf,
       array_ops.one_hot(
           loop_count, depth=nbins, on_value=temp, off_value=0.0))
   return [loop_count + 1, cdf]
コード例 #15
0
ファイル: target_column.py プロジェクト: zhaosv/tensorflow
 def loss_fn(logits, target):
     check_shape_op = control_flow_ops.Assert(
         math_ops.less_equal(array_ops.rank(target), 2), [
             "target's shape should be either [batch_size, 1] or [batch_size]"
         ])
     with ops.control_dependencies([check_shape_op]):
         target = array_ops.reshape(
             target, shape=[array_ops.shape(target)[0], 1])
     return loss_ops.hinge_loss(logits, target)
コード例 #16
0
 def loop_body(loop_count, cdf):
   temp = math_ops.reduce_sum(
       math_ops.cast(
           math_ops.less_equal(indices, loop_count), dtypes.float32))
   cdf = math_ops.add(
       cdf,
       array_ops.one_hot(
           loop_count, depth=nbins, on_value=temp, off_value=0.0))
   return [loop_count + 1, cdf]
コード例 #17
0
def _log_loss_with_two_classes(logits, target):
    check_shape_op = control_flow_ops.Assert(
        math_ops.less_equal(array_ops.rank(target), 2),
        ["target's shape should be either [batch_size, 1] or [batch_size]"])
    with ops.control_dependencies([check_shape_op]):
        target = array_ops.reshape(target,
                                   shape=[array_ops.shape(target)[0], 1])
    return nn.sigmoid_cross_entropy_with_logits(logits,
                                                math_ops.to_float(target))
コード例 #18
0
def get_backwards_probabilities(inputs, sequence_lengths, transitions):
    '''
    CRF backwards probabilities and log normalizer
    
    inputs: bs x L x V unaries
    sequence_length: bs
    transitions: An object implementing CRF transitions
    
    returns: bs x L and bs 
  '''

    batch_size = array_ops.shape(inputs)[0]

    # Split up the first and rest of the inputs in preparation for the forward
    # algorithm.
    first_input = inputs[:, 0, :]

    num_tags = transitions.num_tags

    pairwise = transitions.pack_to_parameter_sequence()
    rest_of_pairwise = pairwise[:, 1:, :]

    rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

    sequence_lengths_minus_one = math_ops.maximum(
        array_ops.constant(0, dtype=sequence_lengths.dtype),
        sequence_lengths - 1)

    # Compute the alpha values in the forward algorithm in order to get the
    # partition function.
    forward_cell = CrfBackwardsRnnCell(transitions)
    # Sequence length is not allowed to be less than zero.
    #

    concatenated_rest_of_input = array_ops.concat(
        [rest_of_input, rest_of_pairwise], axis=2)
    reversed_concatenated_rest_of_input = reverse_and_repad(
        concatenated_rest_of_input, sequence_lengths_minus_one, 0)

    initial_state = array_ops.zeros([batch_size, num_tags],
                                    dtype=dtypes.float32)

    all_betas, betas = rnn.dynamic_rnn(
        cell=forward_cell,
        inputs=reversed_concatenated_rest_of_input,
        sequence_length=sequence_lengths_minus_one,
        initial_state=initial_state,
        dtype=dtypes.float32)
    log_norm = math_ops.reduce_logsumexp(first_input + betas, [1])
    # Mask `log_norm` of the sequences with length <= zero.
    log_norm = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
                               array_ops.zeros_like(log_norm), log_norm)

    all_betas = reverse_and_repad(all_betas, sequence_lengths_minus_one, 0)

    return all_betas, log_norm
コード例 #19
0
def _reshape_targets(targets):
    if targets is None:
        return None
    check_shape_op = control_flow_ops.Assert(
        math_ops.less_equal(array_ops.rank(targets), 2),
        ["target's should be either [batch_size, n_labels] or [batch_size]"])
    with ops.control_dependencies([check_shape_op]):
        targets = array_ops.reshape(targets,
                                    shape=[array_ops.shape(targets)[0], -1])
    return targets
コード例 #20
0
def _reshape_targets(targets):
  if targets is None:
    return None
  check_shape_op = control_flow_ops.Assert(
      math_ops.less_equal(array_ops.rank(targets), 2),
      ["target's should be either [batch_size, n_labels] or [batch_size]"])
  with ops.control_dependencies([check_shape_op]):
    targets = array_ops.reshape(
        targets, shape=[array_ops.shape(targets)[0], -1])
  return targets
コード例 #21
0
ファイル: dnn.py プロジェクト: MrCrumpets/tensorflow
def _reshape_targets(targets):
  """"Reshapes targets into [batch_size, 1] to be compatible with logits."""
  check_shape_op = control_flow_ops.Assert(
      math_ops.less_equal(array_ops.rank(targets), 2),
      ["targets shape should be either [batch_size, 1] or [batch_size]"])
  with ops.control_dependencies([check_shape_op]):
    targets = array_ops.reshape(targets,
                                shape=[array_ops.shape(targets)[0], 1])

  return targets
コード例 #22
0
def _reshape_labels(labels):
    """"Reshapes labels into [batch_size, 1] to be compatible with logits."""
    check_shape_op = control_flow_ops.Assert(
        math_ops.less_equal(array_ops.rank(labels), 2),
        ["labels shape should be either [batch_size, 1] or [batch_size]"])
    with ops.control_dependencies([check_shape_op]):
        labels = array_ops.reshape(labels,
                                   shape=[array_ops.shape(labels)[0], 1])

    return labels
コード例 #23
0
 def _loss_fn(logits, labels):
     with ops.name_scope(None, "hinge_loss", (logits, labels)) as name:
         check_shape_op = control_flow_ops.Assert(
             math_ops.less_equal(array_ops.rank(labels), 2),
             ("labels shape should be either [batch_size, 1] or [batch_size]",
              ))
         with ops.control_dependencies((check_shape_op, )):
             labels = array_ops.reshape(
                 labels, shape=(array_ops.shape(labels)[0], 1))
         return losses.hinge_loss(logits, labels, scope=name)
コード例 #24
0
def pairwise_distance(feature, squared=False):
    """Computes the pairwise distance matrix with numerical stability.

    output[i, j] = || feature[i, :] - feature[j, :] ||_2

    Args:
      feature: 2-D Tensor of size [number of data, feature dimension].
      squared: Boolean, whether or not to square the pairwise distances.

    Returns:
      pairwise_distances: 2-D Tensor of size [number of data, number of data].
    """
    pairwise_distances_squared = (math_ops.add(
        math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),
        math_ops.reduce_sum(math_ops.square(array_ops.transpose(feature)),
                            axis=[0],
                            keepdims=True),
    ) - 2.0 * math_ops.matmul(feature, array_ops.transpose(feature)))

    # Deal with numerical inaccuracies. Set small negatives to zero.
    pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared,
                                                  0.0)
    # Get the mask where the zero distances are at.
    error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)

    # Optionally take the sqrt.
    if squared:
        pairwise_distances = pairwise_distances_squared
    else:
        pairwise_distances = math_ops.sqrt(pairwise_distances_squared +
                                           math_ops.to_float(error_mask) *
                                           1e-16)

    # Undo conditionally adding 1e-16.
    pairwise_distances = math_ops.multiply(
        pairwise_distances,
        math_ops.to_float(math_ops.logical_not(error_mask)))

    # num_data = array_ops.shape(feature)[0]

    # Explicitly set diagonals to zero.

    # import utool as ut
    # ut.embed()
    # mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
    #     array_ops.ones([num_data])
    # )
    mask_offdiagonals = array_ops.ones_like(
        pairwise_distances) - array_ops.diag(
            array_ops.ones(array_ops.shape(feature))[:, 0])

    pairwise_distances = math_ops.multiply(pairwise_distances,
                                           mask_offdiagonals)
    return pairwise_distances
コード例 #25
0
 def _crf_log_norm(self, inputs, seq_lens):
     first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
     first_input = array_ops.squeeze(first_input, [1])
     rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])
     forward_cell = CrfForwardRnnCell(self.transition_params)
     seq_lens_less_one = math_ops.maximum(constant_op.constant(0, dtype=seq_lens.dtype), seq_lens - 1)
     _, alphas = rnn.dynamic_rnn(cell=forward_cell, inputs=rest_of_input, sequence_length=seq_lens_less_one,
                                 initial_state=first_input, dtype=dtypes.float32)
     log_norm = math_ops.reduce_logsumexp(alphas, [1])
     log_norm = array_ops.where(math_ops.less_equal(seq_lens, 0), array_ops.zeros_like(log_norm), log_norm)
     return log_norm
コード例 #26
0
ファイル: crf.py プロジェクト: Jordan1237/tensorflow
 def _single_seq_fn():
   batch_size = array_ops.shape(inputs, out_type=tag_indices.dtype)[0]
   example_inds = array_ops.reshape(
       math_ops.range(batch_size, dtype=tag_indices.dtype), [-1, 1])
   sequence_scores = array_ops.gather_nd(
       array_ops.squeeze(inputs, [1]),
       array_ops.concat([example_inds, tag_indices], axis=1))
   sequence_scores = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
                                     array_ops.zeros_like(sequence_scores),
                                     sequence_scores)
   return sequence_scores
コード例 #27
0
def _assert_close(x, y, data=None, summarize=None, name=None):
    if x.dtype.is_integer:
        return check_ops.assert_equal(x, y, data=data, summarize=summarize, name=name)

    with ops.op_scope([x, y, data], name, "assert_close"):
        x = ops.convert_to_tensor(x, name="x")
        y = ops.convert_to_tensor(y, name="y")
        tol = np.finfo(x.dtype.as_numpy_dtype).resolution
        if data is None:
            data = ["Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ", y.name, y]
        condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x - y), tol))
        return logging_ops.Assert(condition, data, summarize=summarize)
コード例 #28
0
def pairwise_distance_euclidean(a, b=None, squared=False):
    """Computes the pairwise distance matrix with numerical stability.

  output[i, j] = || a[i, :] - b[j, :] ||_2

  Args:
    a: 2-D Tensor of size [number of a, feature dimension].
    b: 2-D Tensor of size [number of b, feature dimension].
    squared: Boolean, whether or not to square the pairwise distances.

  Returns:
    pairwise_distances: 2-D Tensor of size [number of a, number of b].
  """
    b_was_none = False
    if b is None:
        b_was_none = True
        b = tf.identity(a)

    transpose_b = tf.transpose(b)

    pairwise_distances_squared = math_ops.add(
        math_ops.reduce_sum(math_ops.square(a), axis=[1], keepdims=True), # [len_a, 1]
        math_ops.reduce_sum(math_ops.square(transpose_b), axis=[0], keepdims=True)) -\
        2.0 * math_ops.matmul(a, transpose_b) # [len_a, len_b]

    # Deal with numerical inaccuracies. Set small negatives to zero.
    pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared,
                                                  0.0)
    # Get the mask where the zero distances are at.
    error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)

    # Optionally take the sqrt.
    if squared:
        pairwise_distances = pairwise_distances_squared
    else:
        pairwise_distances = math_ops.sqrt(pairwise_distances_squared +
                                           math_ops.to_float(error_mask) *
                                           1e-16)

    # Undo conditionally adding 1e-16.
    pairwise_distances = math_ops.multiply(
        pairwise_distances,
        math_ops.to_float(math_ops.logical_not(error_mask)))

    # If b was None, Explicitly set diagonals to zero.
    if b_was_none:
        num_data = array_ops.shape(a)[0]
        mask_offdiagonals = array_ops.ones_like(
            pairwise_distances) - array_ops.diag(array_ops.ones([num_data]))
        pairwise_distances = math_ops.multiply(pairwise_distances,
                                               mask_offdiagonals)

    return pairwise_distances
コード例 #29
0
ファイル: batching.py プロジェクト: fraudies/tensorflow
 def max_reduce_fn(state, value):
   """Computes the maximum shape to pad to."""
   condition = math_ops.reduce_all(
       math_ops.logical_or(
           math_ops.less_equal(value.dense_shape, padded_shape),
           math_ops.equal(padded_shape, -1)))
   assert_op = control_flow_ops.Assert(condition, [
       "Actual shape greater than padded shape: ", value.dense_shape,
       padded_shape
   ])
   with ops.control_dependencies([assert_op]):
     return math_ops.maximum(state, value.dense_shape)
コード例 #30
0
ファイル: batching.py プロジェクト: ZhangXinNan/tensorflow
 def max_reduce_fn(state, value):
   """Computes the maximum shape to pad to."""
   condition = math_ops.reduce_all(
       math_ops.logical_or(
           math_ops.less_equal(value.dense_shape, padded_shape),
           math_ops.equal(padded_shape, -1)))
   assert_op = control_flow_ops.Assert(condition, [
       "Actual shape greater than padded shape: ", value.dense_shape,
       padded_shape
   ])
   with ops.control_dependencies([assert_op]):
     return math_ops.maximum(state, value.dense_shape)
コード例 #31
0
    def element_to_bucket_id(*args, _ratio_boundaries, _size_boundaries, _ratio_func, _size_func):
        size = tf.cast(_size_func(*args), tf.float32)

        buckets_min = np.concatenate(([np.iinfo(np.int32).min], _size_boundaries)).astype(np.float32)
        buckets_max = np.concatenate((_size_boundaries, [np.iinfo(np.int32).max])).astype(np.float32)
        conditions_c = math_ops.logical_and(
            math_ops.less_equal(buckets_min, size),
            math_ops.less(size, buckets_max))
        size_id = math_ops.reduce_min(array_ops.where(conditions_c))

        ratio = tf.cast(_ratio_func(*args), tf.float32)


        # bucket_key = tf.cast(bucket_id, tf.int64)
        # tf.gather(tf.cast(ratio_boundaries, tf.int64), bucket_id)

        max_length = max(len(row) for row in _ratio_boundaries)
        ratio_boundaries_padded= np.array([row + [np.finfo(np.float32).max] * (max_length - len(row)) for row in _ratio_boundaries])

        offset_id = size_id * (len(ratio_boundaries_padded[0])+ 1)



        buckets_min = tf.concat([[np.finfo(np.float32).min], tf.gather(ratio_boundaries_padded, size_id-1)],axis=0)
        # buckets_max = np.concateate((_ratio_boundaries, [np.iinfo(np.int32).max])).astype(np.float32)
        buckets_min= tf.cast(buckets_min, tf.float32)

        buckets_max = tf.concat([tf.gather(ratio_boundaries_padded,size_id-1), [np.finfo(np.float32).max]], axis=0)
        buckets_max=tf.cast(buckets_max,tf.float32)

        conditions_c = math_ops.logical_and(
            math_ops.less_equal(buckets_min, ratio),
            math_ops.less(ratio, buckets_max))
        ratio_id = math_ops.reduce_min(array_ops.where(conditions_c))





        return offset_id + ratio_id
コード例 #32
0
ファイル: random_gamma_test.py プロジェクト: qwerzou1/shibie
 def testPositive(self):
     n = int(10e3)
     for dt in [dtypes.float16, dtypes.float32, dtypes.float64]:
         with self.cached_session():
             x = random_ops.random_gamma(shape=[n],
                                         alpha=0.001,
                                         dtype=dt,
                                         seed=0)
             self.assertEqual(
                 0,
                 math_ops.reduce_sum(
                     math_ops.cast(math_ops.less_equal(x, 0.),
                                   dtype=dtypes.int64)).eval())
コード例 #33
0
def _binary_hinge_loss(logits, target):
    """Method that returns the loss vector for binary hinge loss."""
    check_shape_op = logging_ops.Assert(
        math_ops.less_equal(array_ops.rank(target), 2),
        ["target's shape should be either [batch_size, 1] or [batch_size]"],
    )
    with ops.control_dependencies([check_shape_op]):
        target = array_ops.reshape(target, shape=[array_ops.shape(target)[0], 1])
    # First need to convert binary labels to -1/1 labels (as floats).
    all_ones = array_ops.ones_like(logits)
    labels = math_ops.sub(2 * math_ops.to_float(target), all_ones)
    loss_vec = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
    return loss_vec
コード例 #34
0
        def element_to_bucket_id(*args):
            """Return int64 id of the length bucket for this element."""
            seq_length = element_length_func(*args)

            boundaries = list(bucket_boundaries)
            buckets_min = [np.iinfo(np.int32).min] + boundaries
            buckets_max = boundaries + [np.iinfo(np.int32).max]
            conditions_c = math_ops.logical_and(
                math_ops.less_equal(buckets_min, seq_length),
                math_ops.less(seq_length, buckets_max))
            bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))

            return bucket_id
コード例 #35
0
ファイル: grouping.py プロジェクト: bunbutter/tensorflow
    def element_to_bucket_id(*args):
      """Return int64 id of the length bucket for this element."""
      seq_length = element_length_func(*args)

      boundaries = list(bucket_boundaries)
      buckets_min = [np.iinfo(np.int32).min] + boundaries
      buckets_max = boundaries + [np.iinfo(np.int32).max]
      conditions_c = math_ops.logical_and(
          math_ops.less_equal(buckets_min, seq_length),
          math_ops.less(seq_length, buckets_max))
      bucket_id = math_ops.reduce_min(array_ops.where(conditions_c))

      return bucket_id
コード例 #36
0
def assert_less_equal(x,
                      y,
                      data=None,
                      summarize=None,
                      message=None,
                      name=None):
    """Assert the condition `x <= y` holds element-wise.

  Example of adding a dependency to an operation:

  ```python
  with tf.control_dependencies([tf.assert_less_equal(x, y)]):
    output = tf.reduce_sum(x)
  ```

  This condition holds if for every pair of (possibly broadcast) elements
  `x[i]`, `y[i]`, we have `x[i] <= y[i]`.
  If both `x` and `y` are empty, this is trivially satisfied.

  Args:
    x:  Numeric `Tensor`.
    y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`, `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).  Defaults to "assert_less_equal"

  Returns:
    Op that raises `InvalidArgumentError` if `x <= y` is False.
  """
    message = message or ''
    with ops.name_scope(name, 'assert_less_equal', [x, y, data]):
        x = ops.convert_to_tensor(x, name='x')
        y = ops.convert_to_tensor(y, name='y')
        if context.executing_eagerly():
            x_name = _shape_and_dtype_str(x)
            y_name = _shape_and_dtype_str(y)
        else:
            x_name = x.name
            y_name = y.name

        if data is None:
            data = [
                message,
                'Condition x <= y did not hold element-wise:'
                'x (%s) = ' % x_name, x,
                'y (%s) = ' % y_name, y
            ]
        condition = math_ops.reduce_all(math_ops.less_equal(x, y))
        return control_flow_ops.Assert(condition, data, summarize=summarize)
コード例 #37
0
def cosine_decay(
    learn_rate,  # learning rate
    epoch,  # epoch
    batch,  # batch epoch
    decay,  # decay
    decay_min_fraction,
    alpha,  # alpha
    epochs,
    final_epochs,  # finalepoch
    delay=0,
    name=None,
):
    with ops.name_scope(name, "LR_Finetune", [learn_rate, epoch]):
        # learning_rate = ops.convert_to_tensor(
        #     learning_rate, name="initial_learning_rate")
        learn_rate = ops.convert_to_tensor(learn_rate,
                                           name="initial_learning_rate")
        dtype = tf.float32
        learn_rate = math_ops.cast(learn_rate, dtype)
        batch = math_ops.cast(batch, dtype)
        final_epochs = math_ops.cast(final_epochs, dtype)
        alpha = math_ops.cast(alpha, dtype)
        decay = math_ops.cast(decay, dtype)
        epoch = math_ops.cast(epoch, dtype)
        completed_fraction = (epoch - delay) / batch
        lam = control_flow_ops.cond(
            math_ops.less_equal(epoch, delay),
            lambda: learn_rate,
            lambda: learn_rate * (decay**math_ops.floor(completed_fraction)),
            lambda: learn_rate * tf.math.maximum((decay**math_ops.floor(
                completed_fraction)), decay_min_fraction),
        )
        return control_flow_ops.cond(
            math_ops.less_equal(epoch, epochs - final_epochs),
            lambda: lam,
            lambda: lam * (alpha + (1 - alpha) * (0.5 + 0.5 * math_ops.cos(
                (epoch - epochs + final_epochs) / final_epochs * 3.14159))),
        )
コード例 #38
0
def _binary_hinge_loss(logits, target):
    """Method that returns the loss vector for binary hinge loss."""
    check_shape_op = logging_ops.Assert(
        math_ops.less_equal(array_ops.rank(target), 2),
        ["target's shape should be either [batch_size, 1] or [batch_size]"])
    with ops.control_dependencies([check_shape_op]):
        target = array_ops.reshape(target,
                                   shape=[array_ops.shape(target)[0], 1])
    # First need to convert binary labels to -1/1 labels (as floats).
    all_ones = array_ops.ones_like(logits)
    labels = math_ops.sub(2 * math_ops.to_float(target), all_ones)
    loss_vec = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels,
                                                               logits)))
    return loss_vec
コード例 #39
0
    def pair_weights(self, sorted_labels):
        """See `_LambdaWeight`.

    The current implementation here is that for any pairs of documents i and j,
    we set the weight to be 1 if
      - i and j have different labels.
      - i <= topn and j > topn or i > topn and j <= topn.
    This is exactly the same as the original LambdaRank method. The weight is
    the gain of swapping a pair of documents.

    Args:
      sorted_labels: A dense `Tensor` of labels with shape [batch_size,
        list_size] that are sorted by logits.

    Returns:
      A `Tensor` that can weight example pairs.
    """
        with ops.name_scope(None, 'precision_lambda_weight',
                            (sorted_labels, )):
            valid_pair, sorted_labels = self._get_valid_pairs_and_clean_labels(
                sorted_labels)
            binary_labels = math_ops.to_float(self._positive_fn(sorted_labels))
            label_diff = math_ops.abs(
                array_ops.expand_dims(binary_labels, 2) -
                array_ops.expand_dims(binary_labels, 1))
            label_diff *= math_ops.to_float(valid_pair)
            # i <= topn and j > topn or i > topn and j <= topn, i.e., xor(i <= topn, j
            # <= topn).
            list_size = array_ops.shape(sorted_labels)[1]
            rank = math_ops.range(list_size) + 1
            rank_mask = math_ops.logical_xor(
                array_ops.expand_dims(math_ops.less_equal(rank, self._topn),
                                      1),
                array_ops.expand_dims(math_ops.less_equal(rank, self._topn),
                                      0))
            return label_diff * math_ops.to_float(rank_mask)
コード例 #40
0
        def _wr_initializer(shape, dtype, partition_info=None):
            wr = wr_init(shape, dtype=dtype)

            connectivity_mask = math_ops.cast(
                math_ops.less_equal(random_ops.random_uniform(shape),
                                    connectivity), dtype)

            wr = math_ops.multiply(wr, connectivity_mask)

            wr_norm2 = math_ops.sqrt(math_ops.reduce_sum(math_ops.square(wr)))

            is_norm_0 = math_ops.cast(math_ops.equal(wr_norm2, 0), dtype)

            wr = wr * wr2_scale / (wr_norm2 + 1 * is_norm_0)

            return wr
コード例 #41
0
def pairwise_distance(feature, squared=False):
  """Computes the pairwise distance matrix with numerical stability.

  output[i, j] = || feature[i, :] - feature[j, :] ||_2

  Args:
    feature: 2-D Tensor of size [number of data, feature dimension].
    squared: Boolean, whether or not to square the pairwise distances.

  Returns:
    pairwise_distances: 2-D Tensor of size [number of data, number of data].
  """
  pairwise_distances_squared = math_ops.add(
      math_ops.reduce_sum(
          math_ops.square(feature),
          axis=[1],
          keepdims=True),
      math_ops.reduce_sum(
          math_ops.square(
              array_ops.transpose(feature)),
          axis=[0],
          keepdims=True)) - 2.0 * math_ops.matmul(
              feature, array_ops.transpose(feature))

  # Deal with numerical inaccuracies. Set small negatives to zero.
  pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
  # Get the mask where the zero distances are at.
  error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)

  # Optionally take the sqrt.
  if squared:
    pairwise_distances = pairwise_distances_squared
  else:
    pairwise_distances = math_ops.sqrt(
        pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)

  # Undo conditionally adding 1e-16.
  pairwise_distances = math_ops.multiply(
      pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))

  num_data = array_ops.shape(feature)[0]
  # Explicitly set diagonals to zero.
  mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
      array_ops.ones([num_data]))
  pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
  return pairwise_distances
コード例 #42
0
def pairwise_distance(feature, squared=False, normalized=True):
    """from the source code of `tf.contrib.losses.metric_learning.triplet_semihard_loss`
    Computes the pairwise distance matrix with numerical stability.
    output[i, j] = || feature[i, :] - feature[j, :] ||_2
    Args:
      feature: 2-D Tensor of size [number of data, feature dimension].
      squared: Boolean, whether or not to square the pairwise distances.
      normalized: Boolean, whether or not input feature has be l2 normalized.
    Returns:
      pairwise_distances: 2-D Tensor of size [number of data, number of data].
    """
    if normalized:
        pairwise_distances_squared = 2.0 * (
            1.0 - math_ops.matmul(feature, array_ops.transpose(feature)))
    else:
        pairwise_distances_squared = math_ops.add(
            math_ops.reduce_sum(math_ops.square(feature), axis=[1], keepdims=True),
            math_ops.reduce_sum(math_ops.square(array_ops.transpose(feature)), axis=[0], keepdims=True))\
            - 2.0 * math_ops.matmul(feature, array_ops.transpose(feature))

    # Deal with numerical inaccuracies. Set small negatives to zero.
    pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared,
                                                  0.0)

    # Optionally take the sqrt.
    if squared:
        pairwise_distances = pairwise_distances_squared
    else:
        # Get the mask where the zero distances are at.
        error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
        pairwise_distances = math_ops.sqrt(
            pairwise_distances_squared +
            math_ops.cast(error_mask, dtypes.float32) * 1e-16)
        # Undo conditionally adding 1e-16.
        pairwise_distances = math_ops.multiply(
            pairwise_distances,
            math_ops.cast(math_ops.logical_not(error_mask), dtypes.float32))

    num_data = array_ops.shape(feature)[0]
    # Explicitly set diagonals to zero.
    mask_offdiagonals = array_ops.ones_like(
        pairwise_distances) - array_ops.diag(array_ops.ones([num_data]))
    pairwise_distances = math_ops.multiply(pairwise_distances,
                                           mask_offdiagonals)
    return pairwise_distances
コード例 #43
0
    def set_up_train(self, pretrain=False):
        self.logger.info("Model setting up train starts")

        decay_func = DECAY_DICT[self.args.dtype]
        if hasattr(self, 'start_epoch'):
            self.logger.info("Current start epoch : {}".format(
                self.start_epoch))
            DECAY_PARAMS_DICT[self.args.hdtype][self.args.nbatch][
                self.args.
                hdptype]['initial_step'] = self.nbatch_train * self.start_epoch
        self.lr, update_step_op = decay_func(**DECAY_PARAMS_DICT[
            self.args.dtype][self.args.nbatch][self.args.dptype])

        print(vars_info_vl(tf.trainable_variables()))
        update_ops = tf.get_collection("update_ops")

        with tf.control_dependencies(update_ops + [update_step_op]):
            self.train_op = get_multi_train_op(tf.train.AdamOptimizer,
                                               self.loss, [self.lr],
                                               [tf.trainable_variables()])

        self.graph_ops_dict = {
            'train': [self.train_op, self.loss],
            'val': self.loss,
            'test': self.loss
        }
        self.val_embed_tensor1 = tf.placeholder(
            tf.float32, shape=[self.args.nbatch, self.args.m])
        self.val_embed_tensor2 = tf.placeholder(tf.float32,
                                                shape=[self.nval, self.args.m])

        self.p_dist = math_ops.add(
                    math_ops.reduce_sum(math_ops.square(self.val_embed_tensor1), axis=[1], keep_dims=True),
                    math_ops.reduce_sum(math_ops.square(array_ops.transpose(self.val_embed_tensor2)), axis=[0], keep_dims=True))-\
                2.0 * math_ops.matmul(self.val_embed_tensor1, array_ops.transpose(self.val_embed_tensor2)) # [batch_size, 1], [1, ndata],  [batch_size, ndata]

        self.p_dist = math_ops.maximum(self.p_dist, 0.0)  # [batch_size, ndata]
        self.p_dist = math_ops.multiply(
            self.p_dist,
            math_ops.to_float(
                math_ops.logical_not(math_ops.less_equal(self.p_dist, 0.0))))
        self.p_max_idx = tf.nn.top_k(
            -self.p_dist, k=2)[1]  # [batch_size, 2] # get smallest 2

        self.logger.info("Model setting up train ends")
コード例 #44
0
ファイル: check_ops.py プロジェクト: Jackiefan/tensorflow
def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
  """Assert the condition `x <= y` holds element-wise.

  Example of adding a dependency to an operation:

  ```python
  with tf.control_dependencies([tf.assert_less_equal(x, y)]):
    output = tf.reduce_sum(x)
  ```

  This condition holds if for every pair of (possibly broadcast) elements
  `x[i]`, `y[i]`, we have `x[i] <= y[i]`.
  If both `x` and `y` are empty, this is trivially satisfied.

  Args:
    x:  Numeric `Tensor`.
    y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`, `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).  Defaults to "assert_less_equal"

  Returns:
    Op that raises `InvalidArgumentError` if `x <= y` is False.
  """
  message = message or ''
  with ops.name_scope(name, 'assert_less_equal', [x, y, data]):
    x = ops.convert_to_tensor(x, name='x')
    y = ops.convert_to_tensor(y, name='y')
    if context.executing_eagerly():
      x_name = _shape_and_dtype_str(x)
      y_name = _shape_and_dtype_str(y)
    else:
      x_name = x.name
      y_name = y.name

    if data is None:
      data = [
          message,
          'Condition x <= y did not hold element-wise:'
          'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
      ]
    condition = math_ops.reduce_all(math_ops.less_equal(x, y))
    return control_flow_ops.Assert(condition, data, summarize=summarize)
コード例 #45
0
def assert_close(x,
                 y,
                 data=None,
                 summarize=None,
                 message=None,
                 name="assert_close"):
    """Assert that that x and y are within machine epsilon of each other.

  Args:
    x: Numeric `Tensor`
    y: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
  """
    message = message or ""
    x = ops.convert_to_tensor(x, name="x")
    y = ops.convert_to_tensor(y, name="y")

    if data is None:
        data = [
            message, "Condition x ~= y did not hold element-wise: x = ",
            x.name, x, "y = ", y.name, y
        ]

    if x.dtype.is_integer:
        return check_ops.assert_equal(x,
                                      y,
                                      data=data,
                                      summarize=summarize,
                                      message=message,
                                      name=name)

    with ops.name_scope(name, "assert_close", [x, y, data]):
        tol = np.finfo(x.dtype.as_numpy_dtype).eps
        condition = math_ops.reduce_all(
            math_ops.less_equal(math_ops.abs(x - y), tol))
        return control_flow_ops.Assert(condition, data, summarize=summarize)
コード例 #46
0
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
                                            labels):
    """Creates examples and variables dictionaries for dense features.

  Variables shapes are inferred from the list of dense feature values passed as
  argument.

  Args:
    dense_features_values: The values of the dense features
    weights: The example weights.
    labels: The example labels.
  Returns:
    One dictionary for the examples and one for the variables.
  """
    dense_tensors = []
    dense_weights = []
    for dense_feature in dense_features_values:
        dense_tensor = ops.convert_to_tensor(dense_feature,
                                             dtype=dtypes.float32)
        check_shape_op = control_flow_ops.Assert(
            math_ops.less_equal(array_ops.rank(dense_tensor), 2), [
                'dense_tensor shape must be [batch_size, dimension] or [batch_size]'
            ])
        # Reshape to [batch_size, dense_column_dimension].
        with ops.control_dependencies([check_shape_op]):
            dense_tensor = array_ops.reshape(
                dense_tensor, [dense_tensor.get_shape().as_list()[0], -1])
        dense_tensors.append(dense_tensor)
        # Add variables of shape [feature_column_dimension].
        dense_weights.append(
            variables_lib.Variable(
                array_ops.zeros([dense_tensor.get_shape().as_list()[1]],
                                dtype=dtypes.float32)))

    examples_dict = dict(sparse_features=[],
                         dense_features=dense_tensors,
                         example_weights=weights,
                         example_labels=labels,
                         example_ids=['%d' % i for i in range(0, len(labels))])
    variables_dict = dict(sparse_features_weights=[],
                          dense_features_weights=dense_weights)

    return examples_dict, variables_dict
コード例 #47
0
    def _multi_seq_fn():
        """Forward computation of alpha values."""
        rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

        # Compute the alpha values in the forward algorithm in order to get the
        # partition function.
        forward_cell = CrfForwardRnnCell(transition_params)
        # Sequence length is not allowed to be less than zero.
        sequence_lengths_less_one = math_ops.maximum(0, sequence_lengths - 1)
        _, alphas = rnn.dynamic_rnn(cell=forward_cell,
                                    inputs=rest_of_input,
                                    sequence_length=sequence_lengths_less_one,
                                    initial_state=first_input,
                                    dtype=dtypes.float32)
        log_norm = math_ops.reduce_logsumexp(alphas, [1])
        # Mask `log_norm` of the sequences with length <= zero.
        log_norm = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
                                   array_ops.zeros_like(log_norm), log_norm)
        return log_norm
コード例 #48
0
def get_forwards_probabilities(inputs, sequence_lengths, transitions):
    '''
    CRF forward probabilities and log normalizer
    
    inputs: bs x L x V unaries
    sequence_length: bs
    transitions: An object implementing CRF transitions
    
    returns: bs x L and bs 
  '''

    # Split up the first and rest of the inputs in preparation for the forward
    # algorithm.
    first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
    first_input = array_ops.squeeze(first_input, [1])
    """Forward computation of alpha values."""
    unary = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

    pairwise = transitions.pack_to_parameter_sequence()
    pairwise = pairwise[:, 1:, :]

    rnn_inputs = array_ops.concat([unary, pairwise], axis=2)

    # Compute the alpha values in the forward algorithm in order to get the
    # partition function.
    forward_cell = CrfForwardRnnCell(transitions)
    # Sequence length is not allowed to be less than zero.
    sequence_lengths_less_one = math_ops.maximum(
        constant_op.constant(0, dtype=sequence_lengths.dtype),
        sequence_lengths - 1)
    all_alphas, alphas = rnn.dynamic_rnn(
        cell=forward_cell,
        inputs=rnn_inputs,
        sequence_length=sequence_lengths_less_one,
        initial_state=first_input,
        dtype=dtypes.float32)

    log_norm = math_ops.reduce_logsumexp(alphas, [1])
    # Mask `log_norm` of the sequences with length <= zero.
    log_norm = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
                               array_ops.zeros_like(log_norm), log_norm)

    return all_alphas, log_norm
コード例 #49
0
ファイル: sdca_ops_test.py プロジェクト: Immexxx/tensorflow
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
                                            labels):
  """Creates examples and variables dictionaries for dense features.

  Variables shapes are inferred from the list of dense feature values passed as
  argument.

  Args:
    dense_features_values: The values of the dense features
    weights: The example weights.
    labels: The example labels.
  Returns:
    One dictionary for the examples and one for the variables.
  """
  dense_tensors = []
  dense_weights = []
  for dense_feature in dense_features_values:
    dense_tensor = ops.convert_to_tensor(dense_feature, dtype=dtypes.float32)
    check_shape_op = control_flow_ops.Assert(
        math_ops.less_equal(array_ops.rank(dense_tensor), 2),
        ['dense_tensor shape must be [batch_size, dimension] or [batch_size]'])
    # Reshape to [batch_size, dense_column_dimension].
    with ops.control_dependencies([check_shape_op]):
      dense_tensor = array_ops.reshape(
          dense_tensor, [dense_tensor.get_shape().as_list()[0], -1])
    dense_tensors.append(dense_tensor)
    # Add variables of shape [feature_column_dimension].
    dense_weights.append(
        variables_lib.Variable(
            array_ops.zeros(
                [dense_tensor.get_shape().as_list()[1]], dtype=dtypes.float32)))

  examples_dict = dict(
      sparse_features=[],
      dense_features=dense_tensors,
      example_weights=weights,
      example_labels=labels,
      example_ids=['%d' % i for i in range(0, len(labels))])
  variables_dict = dict(
      sparse_features_weights=[], dense_features_weights=dense_weights)

  return examples_dict, variables_dict
コード例 #50
0
ファイル: dirichlet.py プロジェクト: zsgchinese/tensorflow
def _assert_close(x, y, data=None, summarize=None, name=None):
    if x.dtype.is_integer:
        return check_ops.assert_equal(x,
                                      y,
                                      data=data,
                                      summarize=summarize,
                                      name=name)

    with ops.op_scope([x, y, data], name, "assert_close"):
        x = ops.convert_to_tensor(x, name="x")
        y = ops.convert_to_tensor(y, name="y")
        tol = np.finfo(x.dtype.as_numpy_dtype).resolution
        if data is None:
            data = [
                "Condition x ~= y did not hold element-wise: x = ", x.name, x,
                "y = ", y.name, y
            ]
        condition = math_ops.reduce_all(
            math_ops.less_equal(math_ops.abs(x - y), tol))
        return logging_ops.Assert(condition, data, summarize=summarize)
コード例 #51
0
def assert_less_equal(x, y, data=None, summarize=None, name=None):
    """Assert the condition `x <= y` holds element-wise.

  Example of adding a dependency to an operation:

  ```python
  with tf.control_dependencies([tf.assert_less_equal(x, y)]):
    output = tf.reduce_sum(x)
  ```

  Example of adding dependency to the tensor being checked:

  ```python
  x = tf.with_dependencies([tf.assert_less_equal(x, y)], x)
  ```

  This condition holds if for every pair of (possibly broadcast) elements
  `x[i]`, `y[i]`, we have `x[i] <= y[i]`.
  If both `x` and `y` are empty, this is trivially satisfied.

  Args:
    x:  Numeric `Tensor`.
    y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`, `y`.
    summarize: Print this many entries of each tensor.
    name: A name for this operation (optional).  Defaults to "assert_less_equal"

  Returns:
    Op that raises `InvalidArgumentError` if `x <= y` is False.
  """
    with ops.op_scope([x, y, data], name, 'assert_less_equal'):
        x = ops.convert_to_tensor(x, name='x')
        y = ops.convert_to_tensor(y, name='y')
        if data is None:
            data = [
                'Condition x <= y did not hold element-wise: x = ', x.name, x,
                'y = ', y.name, y
            ]
        condition = math_ops.reduce_all(math_ops.less_equal(x, y))
        return logging_ops.Assert(condition, data, summarize=summarize)
コード例 #52
0
ファイル: check_ops.py プロジェクト: 10imaging/tensorflow
def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
    """Assert the condition `x <= y` holds element-wise.

  Example of adding a dependency to an operation:

  ```python
  with tf.control_dependencies([tf.assert_less_equal(x, y)]):
    output = tf.reduce_sum(x)
  ```

  Example of adding dependency to the tensor being checked:

  ```python
  x = tf.with_dependencies([tf.assert_less_equal(x, y)], x)
  ```

  This condition holds if for every pair of (possibly broadcast) elements
  `x[i]`, `y[i]`, we have `x[i] <= y[i]`.
  If both `x` and `y` are empty, this is trivially satisfied.

  Args:
    x:  Numeric `Tensor`.
    y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`, `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).  Defaults to "assert_less_equal"

  Returns:
    Op that raises `InvalidArgumentError` if `x <= y` is False.
  """
    message = message or ""
    with ops.op_scope([x, y, data], name, "assert_less_equal"):
        x = ops.convert_to_tensor(x, name="x")
        y = ops.convert_to_tensor(y, name="y")
        if data is None:
            data = [message, "Condition x <= y did not hold element-wise: x = ", x.name, x, "y = ", y.name, y]
        condition = math_ops.reduce_all(math_ops.less_equal(x, y))
        return logging_ops.Assert(condition, data, summarize=summarize)
コード例 #53
0
    def __call__(self, step):
        with ops.name_scope_v2(self.name or "CosineDecayWithWarmup"):
            initial_learning_rate = ops.convert_to_tensor(
                self.initial_learning_rate, name="initial_learning_rate")
            max_learning_rate = ops.convert_to_tensor(
                self.max_learning_rate, name="initial_learning_rate")
            dtype = initial_learning_rate.dtype
            decay_steps = math_ops.cast(self.decay_steps, dtype)
            warmup_steps = math_ops.cast(self.warmup_steps, dtype)
            total_steps = decay_steps + warmup_steps

            global_step_recomp = math_ops.cast(step, dtype)
            global_step_recomp = math_ops.minimum(global_step_recomp,
                                                  total_steps)

            return control_flow_ops.cond(
                math_ops.less_equal(global_step_recomp,
                                    warmup_steps), lambda: self.
                lr_warmup(global_step_recomp, warmup_steps, max_learning_rate,
                          initial_learning_rate), lambda: self.cosine_decay(
                              global_step_recomp, warmup_steps, decay_steps,
                              max_learning_rate, self.alpha))
コード例 #54
0
ファイル: crf.py プロジェクト: Jordan1237/tensorflow
  def _multi_seq_fn():
    """Forward computation of alpha values."""
    rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

    # Compute the alpha values in the forward algorithm in order to get the
    # partition function.
    forward_cell = CrfForwardRnnCell(transition_params)
    # Sequence length is not allowed to be less than zero.
    sequence_lengths_less_one = math_ops.maximum(
        constant_op.constant(0, dtype=sequence_lengths.dtype),
        sequence_lengths - 1)
    _, alphas = rnn.dynamic_rnn(
        cell=forward_cell,
        inputs=rest_of_input,
        sequence_length=sequence_lengths_less_one,
        initial_state=first_input,
        dtype=dtypes.float32)
    log_norm = math_ops.reduce_logsumexp(alphas, [1])
    # Mask `log_norm` of the sequences with length <= zero.
    log_norm = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
                               array_ops.zeros_like(log_norm),
                               log_norm)
    return log_norm
コード例 #55
0
ファイル: check_ops.py プロジェクト: RChandrasekar/tensorflow
def is_non_decreasing(x, name=None):
    """Returns `True` if `x` is non-decreasing.

  Elements of `x` are compared in row-major order.  The tensor `[x[0],...]`
  is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
  If `x` has less than two elements, it is trivially non-decreasing.

  See also:  `is_strictly_increasing`

  Args:
    x: Numeric `Tensor`.
    name: A name for this operation (optional).  Defaults to "is_non_decreasing"

  Returns:
    Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.

  Raises:
    TypeError: if `x` is not a numeric tensor.
  """
    with ops.op_scope([x], name, "is_non_decreasing"):
        diff = _get_diff_for_monotonic_comparison(x)
        # When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
        zero = ops.convert_to_tensor(0, dtype=diff.dtype)
        return math_ops.reduce_all(math_ops.less_equal(zero, diff))
コード例 #56
0
ファイル: tensor_node.py プロジェクト: keveman/tensorflow
 def __le__(self, other):
   return math_ops.less_equal(self, other)
コード例 #57
0
def kernel_classifier_distance_and_std_from_activations(real_activations,
                                                        generated_activations,
                                                        max_block_size=1024,
                                                        dtype=None):
  """Kernel "classifier" distance for evaluating a generative model.

  This methods computes the kernel classifier distance from activations of
  real images and generated images. This can be used independently of the
  kernel_classifier_distance() method, especially in the case of using large
  batches during evaluation where we would like to precompute all of the
  activations before computing the classifier distance, or if we want to
  compute multiple metrics based on the same images. It also returns a rough
  estimate of the standard error of the estimator.

  This technique is described in detail in https://arxiv.org/abs/1801.01401.
  Given two distributions P and Q of activations, this function calculates

      E_{X, X' ~ P}[k(X, X')] + E_{Y, Y' ~ Q}[k(Y, Y')]
        - 2 E_{X ~ P, Y ~ Q}[k(X, Y)]

  where k is the polynomial kernel

      k(x, y) = ( x^T y / dimension + 1 )^3.

  This captures how different the distributions of real and generated images'
  visual features are. Like the Frechet distance (and unlike the Inception
  score), this is a true distance and incorporates information about the
  target images. Unlike the Frechet score, this function computes an
  *unbiased* and asymptotically normal estimator, which makes comparing
  estimates across models much more intuitive.

  The estimator used takes time quadratic in max_block_size. Larger values of
  max_block_size will decrease the variance of the estimator but increase the
  computational cost. This differs slightly from the estimator used by the
  original paper; it is the block estimator of https://arxiv.org/abs/1307.1954.
  The estimate of the standard error will also be more reliable when there are
  more blocks, i.e. when max_block_size is smaller.

  NOTE: the blocking code assumes that real_activations and
  generated_activations are both in random order. If either is sorted in a
  meaningful order, the estimator will behave poorly.

  Args:
    real_activations: 2D Tensor containing activations of real data. Shape is
      [batch_size, activation_size].
    generated_activations: 2D Tensor containing activations of generated data.
      Shape is [batch_size, activation_size].
    max_block_size: integer, default 1024. The distance estimator splits samples
      into blocks for computational efficiency. Larger values are more
      computationally expensive but decrease the variance of the distance
      estimate. Having a smaller block size also gives a better estimate of the
      standard error.
    dtype: If not None, coerce activations to this dtype before computations.

  Returns:
   The Kernel Inception Distance. A floating-point scalar of the same type
     as the output of the activations.
   An estimate of the standard error of the distance estimator (a scalar of
     the same type).
  """

  real_activations.shape.assert_has_rank(2)
  generated_activations.shape.assert_has_rank(2)
  real_activations.shape[1].assert_is_compatible_with(
      generated_activations.shape[1])

  if dtype is None:
    dtype = real_activations.dtype
    assert generated_activations.dtype == dtype
  else:
    real_activations = math_ops.cast(real_activations, dtype)
    generated_activations = math_ops.cast(generated_activations, dtype)

  # Figure out how to split the activations into blocks of approximately
  # equal size, with none larger than max_block_size.
  n_r = array_ops.shape(real_activations)[0]
  n_g = array_ops.shape(generated_activations)[0]

  n_bigger = math_ops.maximum(n_r, n_g)
  n_blocks = math_ops.to_int32(math_ops.ceil(n_bigger / max_block_size))

  v_r = n_r // n_blocks
  v_g = n_g // n_blocks

  n_plusone_r = n_r - v_r * n_blocks
  n_plusone_g = n_g - v_g * n_blocks

  sizes_r = array_ops.concat([
      array_ops.fill([n_blocks - n_plusone_r], v_r),
      array_ops.fill([n_plusone_r], v_r + 1),
  ], 0)
  sizes_g = array_ops.concat([
      array_ops.fill([n_blocks - n_plusone_g], v_g),
      array_ops.fill([n_plusone_g], v_g + 1),
  ], 0)

  zero = array_ops.zeros([1], dtype=dtypes.int32)
  inds_r = array_ops.concat([zero, math_ops.cumsum(sizes_r)], 0)
  inds_g = array_ops.concat([zero, math_ops.cumsum(sizes_g)], 0)

  dim = math_ops.cast(real_activations.shape[1], dtype)

  def compute_kid_block(i):
    """Computes the ith block of the KID estimate."""
    r_s = inds_r[i]
    r_e = inds_r[i + 1]
    r = real_activations[r_s:r_e]
    m = math_ops.cast(r_e - r_s, dtype)

    g_s = inds_g[i]
    g_e = inds_g[i + 1]
    g = generated_activations[g_s:g_e]
    n = math_ops.cast(g_e - g_s, dtype)

    k_rr = (math_ops.matmul(r, r, transpose_b=True) / dim + 1)**3
    k_rg = (math_ops.matmul(r, g, transpose_b=True) / dim + 1)**3
    k_gg = (math_ops.matmul(g, g, transpose_b=True) / dim + 1)**3
    return (-2 * math_ops.reduce_mean(k_rg) +
            (math_ops.reduce_sum(k_rr) - math_ops.trace(k_rr)) / (m * (m - 1)) +
            (math_ops.reduce_sum(k_gg) - math_ops.trace(k_gg)) / (n * (n - 1)))

  ests = map_fn.map_fn(
      compute_kid_block, math_ops.range(n_blocks), dtype=dtype, back_prop=False)

  mn = math_ops.reduce_mean(ests)

  # nn_impl.moments doesn't use the Bessel correction, which we want here
  n_blocks_ = math_ops.cast(n_blocks, dtype)
  var = control_flow_ops.cond(
      math_ops.less_equal(n_blocks, 1),
      lambda: array_ops.constant(float('nan'), dtype=dtype),
      lambda: math_ops.reduce_sum(math_ops.square(ests - mn)) / (n_blocks_ - 1))

  return mn, math_ops.sqrt(var / n_blocks_)
コード例 #58
0
 def MyFn(x):
   with ops.control_dependencies(
       [control_flow_ops.Assert(math_ops.less_equal(x, 10.0), [x])]):
     return array_ops.identity(x)
コード例 #59
0
ファイル: bucket_ops.py プロジェクト: ComeOnGetMe/tensorflow
def bucket_by_sequence_length(input_length,
                              tensors,
                              batch_size,
                              bucket_boundaries,
                              num_threads=1,
                              capacity=32,
                              shapes=None,
                              dynamic_pad=False,
                              allow_smaller_final_batch=False,
                              keep_input=None,
                              shared_name=None,
                              name=None):
  """Lazy bucketing of inputs according to their length.

  This method calls `tf.contrib.training.bucket` under the hood, after first
  subdividing the bucket boundaries into separate buckets and identifying which
  bucket the given `input_length` belongs to.  See the documentation for
  `which_bucket` for details of the other arguments.

  Args:
    input_length: `int32` scalar `Tensor`, the sequence length of tensors.
    tensors: The list or dictionary of tensors, representing a single element,
      to bucket.  Nested lists are not supported.
    batch_size: The new batch size pulled from the queue
      (python int or int32 scalar).
    bucket_boundaries: int list, increasing non-negative numbers.
      The edges of the buckets to use when bucketing tensors.  Two extra buckets
      are created, one for `input_length < bucket_boundaries[0]` and
      one for `input_length >= bucket_boundaries[-1]`.
    num_threads: An integer.  The number of threads enqueuing `tensors`.
    capacity: An integer. The maximum number of minibatches in the top queue,
      and also the maximum number of elements within each bucket.
    shapes: (Optional) The shapes for each example.  Defaults to the
      inferred shapes for `tensors`.
    dynamic_pad: Boolean.  Allow variable dimensions in input shapes.
      The given dimensions are padded upon dequeue so that tensors within a
      batch have the same shapes.
    allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
      batches to be smaller if there are insufficient items left in the queues.
    keep_input: (Optional).  A `bool` scalar Tensor.  If provided, this tensor
      controls whether the input is added to the queue or not.  If it evaluates
      `True`, then `tensors` are added to the bucket; otherwise they are
      dropped.  This tensor essentially acts as a filtering mechanism.
      The default behavior is to assume `keep_input=True`.
    shared_name: (Optional). If set, the queues will be shared under the given
      name across multiple sessions.
    name: (Optional) A name for the operations.

  Returns:
    A tuple `(sequence_length, outputs)` where `sequence_length` is
    a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
    of batched, bucketed, outputs corresponding to elements of `tensors`.

  Raises:
    TypeError: if `bucket_boundaries` is not a list of python integers.
    ValueError: if `bucket_boundaries` is empty or contains non-increasing
      values.
  """
  tensor_list = _as_tensor_list(tensors)
  if not isinstance(bucket_boundaries, (list, tuple)):
    raise TypeError(
        "bucket_boundaries must be a list or tuple, but received: %s"
        % bucket_boundaries)
  if not bucket_boundaries:
    raise ValueError("bucket_boundaries must not be empty")
  for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):
    if not isinstance(s, int) or not isinstance(e, int):
      raise TypeError(
          "bucket boundaries must be integers, but saw: %s and %s" % (s, e))
    if s >= e:
      raise ValueError(
          "Buckets must contain sequential increasing lengths, but saw: "
          "%d before %d" % (s, e))

  with ops.name_scope(name, "bucket_by_sequence_length",
                      [input_length] + tensor_list) as name:
    input_length = ops.convert_to_tensor(
        input_length, dtype=dtypes.int32, name="input_length")
    # Bucketing conditions are:
    #   l < b[0]
    #   b[0] <= l < b[1]
    #   b[1] <= l < b[2]
    #   ...
    #   b[N-2] <= l < b[N-1]
    #   b[N-1] <= l
    # Equivalent to:
    #   [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]
    buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)
    buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]
    conditions_c = math_ops.logical_and(
        math_ops.less_equal(buckets_min, input_length),
        math_ops.less(input_length, buckets_max))
    which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))
    which_bucket = math_ops.to_int32(which_bucket)

    if shapes is not None:
      shapes = [tensor_shape.scalar()] + shapes

    _, dequeued = bucket(
        tensors=[input_length] + tensor_list,
        which_bucket=which_bucket,
        batch_size=batch_size,
        num_buckets=len(bucket_boundaries) + 1,
        num_threads=num_threads,
        capacity=capacity,
        shapes=shapes,
        dynamic_pad=dynamic_pad,
        allow_smaller_final_batch=allow_smaller_final_batch,
        keep_input=keep_input,
        shared_name=shared_name)

    return (dequeued[0], _as_original_type(tensors, dequeued[1:]))
コード例 #60
0
ファイル: math_utils.py プロジェクト: AutumnQYN/tensorflow
 def _update_statistics_from_mini_batch(
     self, statistics, auxiliary_variables, times, values):
   """Given mini-batch input, update `statistics` and `auxiliary_variables`."""
   values = math_ops.cast(values, self._dtype)
   # The density (measured in times per observation) that we see in each part
   # of the mini-batch.
   batch_inter_observation_duration = (math_ops.cast(
       math_ops.reduce_max(times, axis=1) - math_ops.reduce_min(times, axis=1),
       self._dtype) / math_ops.cast(
           array_ops.shape(times)[1] - 1, self._dtype))
   # Co-locate updates with their variables to minimize race conditions when
   # updating statistics.
   with ops.colocate_with(auxiliary_variables.max_time_seen):
     # There is a race condition if this value is being updated from multiple
     # workers. However, it should eventually reach the correct value if the
     # last chunk is presented enough times.
     max_time_seen_assign = state_ops.assign(
         auxiliary_variables.max_time_seen,
         gen_math_ops.maximum(auxiliary_variables.max_time_seen,
                              math_ops.reduce_max(times)))
   with ops.colocate_with(auxiliary_variables.chunk_count):
     chunk_count_assign = state_ops.assign_add(auxiliary_variables.chunk_count,
                                               array_ops.shape(
                                                   times,
                                                   out_type=dtypes.int64)[0])
   with ops.colocate_with(auxiliary_variables.inter_observation_duration_sum):
     inter_observation_duration_assign = state_ops.assign_add(
         auxiliary_variables.inter_observation_duration_sum,
         math_ops.reduce_sum(batch_inter_observation_duration))
   with ops.colocate_with(auxiliary_variables.example_count):
     example_count_assign = state_ops.assign_add(
         auxiliary_variables.example_count,
         array_ops.size(times, out_type=dtypes.int64))
   # Note: These mean/variance updates assume that all points are equally
   # likely, which is not true if _chunks_ are sampled uniformly from the space
   # of all possible contiguous chunks, since points at the start and end of
   # the series are then members of fewer chunks. For series which are much
   # longer than the chunk size (the usual/expected case), this effect becomes
   # irrelevant.
   with ops.colocate_with(auxiliary_variables.overall_feature_sum):
     overall_feature_sum_assign = state_ops.assign_add(
         auxiliary_variables.overall_feature_sum,
         math_ops.reduce_sum(values, axis=[0, 1]))
   with ops.colocate_with(auxiliary_variables.overall_feature_sum_of_squares):
     overall_feature_sum_of_squares_assign = state_ops.assign_add(
         auxiliary_variables.overall_feature_sum_of_squares,
         math_ops.reduce_sum(values**2, axis=[0, 1]))
   per_chunk_aux_updates = control_flow_ops.group(
       max_time_seen_assign, chunk_count_assign,
       inter_observation_duration_assign, example_count_assign,
       overall_feature_sum_assign, overall_feature_sum_of_squares_assign)
   with ops.control_dependencies([per_chunk_aux_updates]):
     example_count_float = math_ops.cast(auxiliary_variables.example_count,
                                         self._dtype)
     new_feature_mean = (auxiliary_variables.overall_feature_sum /
                         example_count_float)
     overall_feature_mean_update = state_ops.assign(
         statistics.overall_feature_moments.mean, new_feature_mean)
     overall_feature_var_update = state_ops.assign(
         statistics.overall_feature_moments.variance,
         # De-biased n / (n - 1) variance correction
         example_count_float / (example_count_float - 1.) *
         (auxiliary_variables.overall_feature_sum_of_squares /
          example_count_float - new_feature_mean**2))
     # TODO(b/35675805): Remove this cast
     min_time_batch = math_ops.cast(math_ops.argmin(times[:, 0]), dtypes.int32)
     def series_start_updates():
       # If this is the lowest-time chunk that we have seen so far, update
       # series start moments to reflect that. Note that these statistics are
       # "best effort", as there are race conditions in the update (however,
       # they should eventually converge if the start of the series is
       # presented enough times).
       mean, variance = nn.moments(
           values[min_time_batch, :self._starting_variance_window_size],
           axes=[0])
       return control_flow_ops.group(
           state_ops.assign(statistics.series_start_moments.mean, mean),
           state_ops.assign(statistics.series_start_moments.variance,
                            variance))
     with ops.colocate_with(statistics.start_time):
       series_start_update = control_flow_ops.cond(
           # Update moments whenever we even match the lowest time seen so far,
           # to ensure that series start statistics are eventually updated to
           # their correct values, despite race conditions (i.e. eventually
           # statistics.start_time will reflect the global lowest time, and
           # given that we will eventually update the series start moments to
           # their correct values).
           math_ops.less_equal(times[min_time_batch, 0],
                               statistics.start_time),
           series_start_updates,
           control_flow_ops.no_op)
       with ops.control_dependencies([series_start_update]):
         # There is a race condition if this update is performed in parallel on
         # multiple workers. Since models may be sensitive to being presented
         # with times before the putative start time, the value of this
         # variable is post-processed above to guarantee that each worker is
         # presented with a start time which is at least as low as the lowest
         # time in its current mini-batch.
         start_time_update = state_ops.assign(statistics.start_time,
                                              gen_math_ops.minimum(
                                                  statistics.start_time,
                                                  math_ops.reduce_min(times)))
     inter_observation_duration_estimate = (
         auxiliary_variables.inter_observation_duration_sum / math_ops.cast(
             auxiliary_variables.chunk_count, self._dtype))
     # Estimate the total number of observations as:
     #   (end time - start time + 1) * average intra-chunk time density
     total_observation_count_update = state_ops.assign(
         statistics.total_observation_count,
         math_ops.cast(
             gen_math_ops.round(
                 math_ops.cast(auxiliary_variables.max_time_seen -
                               statistics.start_time + 1, self._dtype) /
                 inter_observation_duration_estimate), dtypes.int64))
     per_chunk_stat_updates = control_flow_ops.group(
         overall_feature_mean_update, overall_feature_var_update,
         series_start_update, start_time_update,
         total_observation_count_update)
   return per_chunk_stat_updates