Esempio n. 1
0
 def _loss_fn(labels, logits):
   check_labels = control_flow_ops.Assert(
       math_ops.reduce_all(math_ops.equal(labels, labels_input)),
       data=[labels])
   check_logits = control_flow_ops.Assert(
       math_ops.reduce_all(math_ops.equal(logits, logits_input)),
       data=[logits])
   with ops.control_dependencies([check_labels, check_logits]):
     return constant_op.constant(loss)
Esempio n. 2
0
def _compute_energy_change(current_target_log_prob,
                           current_momentums,
                           proposed_target_log_prob,
                           proposed_momentums,
                           independent_chain_ndims,
                           name=None):
  """Helper to `kernel` which computes the energy change."""
  with ops.name_scope(
      name, "compute_energy_change",
      ([current_target_log_prob, proposed_target_log_prob,
        independent_chain_ndims] +
       current_momentums + proposed_momentums)):
    # Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy
    # since they're a mouthful and lets us inline more.
    lk0, lk1 = [], []
    for current_momentum, proposed_momentum in zip(current_momentums,
                                                   proposed_momentums):
      axis = math_ops.range(independent_chain_ndims,
                            array_ops.rank(current_momentum))
      lk0.append(_log_sum_sq(current_momentum, axis))
      lk1.append(_log_sum_sq(proposed_momentum, axis))

    lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),
                                                  axis=-1)
    lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),
                                                  axis=-1)
    lp0 = -current_target_log_prob   # log_potential
    lp1 = -proposed_target_log_prob  # proposed_log_potential
    x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],
                        axis=-1)

    # The sum is NaN if any element is NaN or we see both +Inf and -Inf.
    # Thus we will replace such rows with infinite energy change which implies
    # rejection. Recall that float-comparisons with NaN are always False.
    is_sum_determinate = (
        math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &
        math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))
    is_sum_determinate = array_ops.tile(
        is_sum_determinate[..., array_ops.newaxis],
        multiples=array_ops.concat([
            array_ops.ones(array_ops.rank(is_sum_determinate),
                           dtype=dtypes.int32),
            [4],
        ], axis=0))
    x = array_ops.where(is_sum_determinate,
                        x,
                        array_ops.fill(array_ops.shape(x),
                                       value=x.dtype.as_numpy_dtype(np.inf)))

    return math_ops.reduce_sum(x, axis=-1)
Esempio n. 3
0
def same_dynamic_shape(a, b):
  """Returns whether a and b have the same dynamic shape.

  Args:
    a: `Tensor`
    b: `Tensor`

  Returns:
    `Boolean` `Tensor` representing if both tensors have the same shape.
  """
  a = ops.convert_to_tensor(a, name="a")
  b = ops.convert_to_tensor(b, name="b")

  # One of the shapes isn't fully defined, so we need to use the dynamic
  # shape.
  return control_flow_ops.cond(
      math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
      # Here we can't just do math_ops.equal(a.shape, b.shape), since
      # static shape inference may break the equality comparison between
      # shape(a) and shape(b) in math_ops.equal.
      lambda: math_ops.reduce_all(math_ops.equal(
          array_ops.concat_v2((
              array_ops.shape(a),
              array_ops.shape(b)), 0),
          array_ops.concat_v2((
              array_ops.shape(b),
              array_ops.shape(a)), 0))),
      lambda: constant_op.constant(False))
Esempio n. 4
0
  def testNonSequenceNestedStructure(self):
    components = np.array([1, 2, 3], dtype=np.int64)

    dataset = dataset_ops.Dataset.from_tensors(components)
    self.assertEqual(dtypes.int64,
                     dataset_ops.get_legacy_output_types(dataset))
    self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))

    dataset = dataset.filter(
        lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
    self.assertEqual(dtypes.int64,
                     dataset_ops.get_legacy_output_types(dataset))
    self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))

    dataset = dataset.map(lambda x: array_ops.stack([x, x]))
    self.assertEqual(dtypes.int64,
                     dataset_ops.get_legacy_output_types(dataset))
    self.assertEqual([2, 3], dataset_ops.get_legacy_output_shapes(dataset))

    dataset = dataset.flat_map(
        lambda x: dataset_ops.Dataset.from_tensor_slices(x))
    self.assertEqual(dtypes.int64,
                     dataset_ops.get_legacy_output_types(dataset))
    self.assertEqual([3], dataset_ops.get_legacy_output_shapes(dataset))

    get_next = self.getNext(dataset)
    self.assertEqual(dtypes.int64, get_next().dtype)
    self.assertEqual([3], get_next().shape)
Esempio n. 5
0
 def testUniformSamplePdf(self):
   a = 10.0
   b = [11.0, 100.0]
   uniform = uniform_lib.Uniform(a, b)
   self.assertTrue(
       self.evaluate(
           math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0)))
Esempio n. 6
0
def assert_less_equal(x, y, data=None, summarize=None, name=None):
  """Assert the condition `x <= y` holds element-wise.

  This condition holds if for every pair of (possibly broadcast) elements
  `x[i]`, `y[i]`, we have `x[i] <= y[i]`.
  If both `x` and `y` are empty, this is trivially satisfied.

  Args:
    x:  Numeric `Tensor`.
    y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`, `y`.
    summarize: Print this many entries of each tensor.
    name: A name for this operation (optional).  Defaults to "assert_less_equal"

  Returns:
    Op that raises `InvalidArgumentError` if `x <= y` is False.
  """
  with ops.op_scope([x, y, data], name, 'assert_less_equal'):
    x = ops.convert_to_tensor(x, name='x')
    y = ops.convert_to_tensor(y, name='y')
    if data is None:
      data = [
          'Condition x <= y did not hold element-wise: x = ', x.name, x, 'y = ',
          y.name, y
      ]
    condition = math_ops.reduce_all(math_ops.less_equal(x, y))
    return logging_ops.Assert(condition, data, summarize=summarize)
Esempio n. 7
0
 def testUniformSamplePdf(self):
   with self.test_session():
     a = 10.0
     b = [11.0, 100.0]
     uniform = uniform_lib.Uniform(a, b)
     self.assertTrue(
         math_ops.reduce_all(uniform.pdf(uniform.sample(10)) > 0).eval())
Esempio n. 8
0
def _call_loss_fn(loss_fn, labels, logits, features):
  """Calls loss_fn and checks the returned shape.

  Args:
    loss_fn: The loss function.
    labels: Processed labels Tensor.
    logits: Logits Tensor of shape [batch_size, logits_dimension].
    features: Features dict.
  Returns:
    Loss Tensor with shape [batch_size, 1].
  """
  loss_fn_args = util.fn_args(loss_fn)
  kwargs = {}
  if 'features' in loss_fn_args:
    kwargs['features'] = features
  unweighted_loss = loss_fn(labels=labels, logits=logits, **kwargs)
  batch_size = array_ops.shape(logits)[0]
  loss_shape = array_ops.shape(unweighted_loss)
  check_shape_op = control_flow_ops.Assert(
      math_ops.reduce_all(math_ops.equal(loss_shape, [batch_size, 1])),
      data=[
          'loss_fn must return Tensor of shape [batch_size, 1]. Given: ',
          loss_shape])
  with ops.control_dependencies([check_shape_op]):
    return array_ops.identity(unweighted_loss)
Esempio n. 9
0
def assert_close(
    x, y, data=None, summarize=None, message=None, name="assert_close"):
  """Assert that that x and y are within machine epsilon of each other.

  Args:
    x: Numeric `Tensor`
    y: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
  """
  message = message or ""
  x = ops.convert_to_tensor(x, name="x")
  y = ops.convert_to_tensor(y, name="y")

  if x.dtype.is_integer:
    return check_ops.assert_equal(
        x, y, data=data, summarize=summarize, message=message, name=name)

  with ops.name_scope(name, "assert_close", [x, y, data]):
    tol = np.finfo(x.dtype.as_numpy_dtype).resolution
    if data is None:
      data = [
          message,
          "Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
          y.name, y
      ]
    condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
    return control_flow_ops.Assert(
        condition, data, summarize=summarize)
Esempio n. 10
0
  def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                        [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))

      def maybe_sample():
        """Perform scheduled sampling."""
        where_sampling = math_ops.cast(
            array_ops.where(sample_ids > -1), dtypes.int32)
        where_not_sampling = math_ops.cast(
            array_ops.where(sample_ids <= -1), dtypes.int32)
        where_sampling_flat = array_ops.reshape(where_sampling, [-1])
        where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
        sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
        inputs_not_sampling = array_ops.gather(
            base_next_inputs, where_not_sampling_flat)
        sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
        base_shape = array_ops.shape(base_next_inputs)
        return (array_ops.scatter_nd(indices=where_sampling,
                                     updates=sampled_next_inputs,
                                     shape=base_shape)
                + array_ops.scatter_nd(indices=where_not_sampling,
                                       updates=inputs_not_sampling,
                                       shape=base_shape))

      all_finished = math_ops.reduce_all(finished)
      next_inputs = control_flow_ops.cond(
          all_finished, lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state)
  def testNonSequenceNestedStructure(self):
    components = np.array([1, 2, 3], dtype=np.int64)

    dataset = dataset_ops.Dataset.from_tensors(components)
    self.assertEquals(dtypes.int64, dataset.output_types)
    self.assertEquals([3], dataset.output_shapes)

    dataset = dataset.filter(
        lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
    self.assertEquals(dtypes.int64, dataset.output_types)
    self.assertEquals([3], dataset.output_shapes)

    dataset = dataset.map(lambda x: array_ops.stack([x, x]))
    self.assertEquals(dtypes.int64, dataset.output_types)
    self.assertEquals([2, 3], dataset.output_shapes)

    dataset = dataset.flat_map(
        lambda x: dataset_ops.Dataset.from_tensor_slices(x))
    self.assertEquals(dtypes.int64, dataset.output_types)
    self.assertEquals([3], dataset.output_shapes)

    iterator = dataset.make_one_shot_iterator()
    get_next = iterator.get_next()
    self.assertEquals(dtypes.int64, get_next.dtype)
    self.assertEquals([3], get_next.shape)
Esempio n. 12
0
def is_strictly_increasing(x, name=None):
    """Returns `True` if `x` is strictly increasing.

  Elements of `x` are compared in row-major order.  The tensor `[x[0],...]`
  is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
  If `x` has less than two elements, it is trivially strictly increasing.

  See also:  `is_non_decreasing`

  Args:
    x: Numeric `Tensor`.
    name: A name for this operation (optional).
      Defaults to "is_strictly_increasing"

  Returns:
    Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.

  Raises:
    TypeError: if `x` is not a numeric tensor.
  """
    with ops.op_scope([x], name, "is_strictly_increasing"):
        diff = _get_diff_for_monotonic_comparison(x)
        # When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
        zero = ops.convert_to_tensor(0, dtype=diff.dtype)
        return math_ops.reduce_all(math_ops.less(zero, diff))
Esempio n. 13
0
def _assert_batch_positive_definite(sigma_chol):
    """Add assertions checking that the sigmas are all Positive Definite.

  Given `sigma_chol == cholesky(sigma)`, it is sufficient to check that
  `all(diag(sigma_chol) > 0)`.  This is because to check that a matrix is PD,
  it is sufficient that its cholesky factorization is PD, and to check that a
  triangular matrix is PD, it is sufficient to check that its diagonal
  entries are positive.

  Args:
    sigma_chol: N-D.  The lower triangular cholesky decomposition of `sigma`.

  Returns:
    An assertion op to use with `control_dependencies`, verifying that
    `sigma_chol` is positive definite.
  """
    sigma_batch_diag = array_ops.batch_matrix_diag_part(sigma_chol)
    return logging_ops.Assert(
        math_ops.reduce_all(sigma_batch_diag > 0),
        [
            "sigma_chol is not positive definite.  batched diagonals: ",
            sigma_batch_diag,
            " shaped: ",
            array_ops.shape(sigma_batch_diag),
        ],
    )
Esempio n. 14
0
 def _assert_has_shape(x, shape):
     x_shape = array_ops.shape(x)
     packed_shape = array_ops.pack(shape)
     return logging_ops.Assert(
         math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
         ["Expected shape for Tensor %s is " % x.name, packed_shape, " but saw shape: ", x_shape],
     )
Esempio n. 15
0
 def testAxesType(self):
   for dtype in [dtypes.int64, dtypes.int32]:
     with self.test_session(use_gpu=True) as sess:
       v = math_ops.reduce_all([True, True],
                               constant_op.constant(0, dtype=dtype))
       tf_v = sess.run(v)
     self.assertAllEqual(tf_v, True)
Esempio n. 16
0
def random_crop(value, size, seed=None, name=None):
  """Randomly crops a tensor to a given size.

  Slices a shape `size` portion out of `value` at a uniformly chosen offset.
  Requires `value.shape >= size`.

  If a dimension should not be cropped, pass the full size of that dimension.
  For example, RGB images can be cropped with
  `size = [crop_height, crop_width, 3]`.

  Args:
    value: Input tensor to crop.
    size: 1-D tensor with size the rank of `value`.
    seed: Python integer. Used to create a random seed. See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.
    name: A name for this operation (optional).

  Returns:
    A cropped tensor of the same rank as `value` and shape `size`.
  """
  # TODO(shlens): Implement edge case to guarantee output size dimensions.
  # If size > value.shape, zero pad the result so that it always has shape
  # exactly size.
  with ops.op_scope([value, size], name, "random_crop") as name:
    value = ops.convert_to_tensor(value, name="value")
    size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size")
    shape = array_ops.shape(value)
    check = logging_ops.Assert(math_ops.reduce_all(shape >= size),
                               ["Need value.shape >= size, got ", shape, size])
    shape = control_flow_ops.with_dependencies([check], shape)
    limit = shape - size + 1
    offset = random_uniform(array_ops.shape(shape), dtype=size.dtype,
                            maxval=size.dtype.max, seed=seed) % limit
    return array_ops.slice(value, offset, size, name=name)
Esempio n. 17
0
 def initialize(self, name=None):
   with ops.name_scope(name, "TrainingHelperInitialize"):
     finished = math_ops.equal(0, self._sequence_length)
     all_finished = math_ops.reduce_all(finished)
     next_inputs = control_flow_ops.cond(
         all_finished, lambda: self._zero_inputs,
         lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
     return (finished, next_inputs)
Esempio n. 18
0
  def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
                        [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledOutputTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))
      sample_ids = math_ops.cast(sample_ids, dtypes.bool)

      def maybe_sample():
        """Perform scheduled sampling."""

        def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
          """Concatenate outputs with auxiliary inputs, if they exist."""
          if self._auxiliary_input_tas is None:
            return outputs_

          next_time = time + 1
          auxiliary_inputs = nest.map_structure(
              lambda ta: ta.read(next_time), self._auxiliary_input_tas)
          if indices is not None:
            auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
          return nest.map_structure(
              lambda x, y: array_ops.concat((x, y), -1),
              outputs_, auxiliary_inputs)

        if self._next_inputs_fn is None:
          return array_ops.where(
              sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
              base_next_inputs)

        where_sampling = math_ops.cast(
            array_ops.where(sample_ids), dtypes.int32)
        where_not_sampling = math_ops.cast(
            array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
        outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
        inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
                                                  where_not_sampling)
        sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
            self._next_inputs_fn(outputs_sampling), where_sampling)

        base_shape = array_ops.shape(base_next_inputs)
        return (array_ops.scatter_nd(indices=where_sampling,
                                     updates=sampled_next_inputs,
                                     shape=base_shape)
                + array_ops.scatter_nd(indices=where_not_sampling,
                                       updates=inputs_not_sampling,
                                       shape=base_shape))

      all_finished = math_ops.reduce_all(finished)
      no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
      next_inputs = control_flow_ops.cond(
          math_ops.logical_or(all_finished, no_samples),
          lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state)
Esempio n. 19
0
 def next_inputs(self, sample_ids,name=None):
   finished = math_ops.equal(sample_ids, self.config.eos_token)
   all_finished = math_ops.reduce_all(finished)
   next_inputs = control_flow_ops.cond(
       all_finished,
       # If we're finished, the next_inputs value doesn't matter
       lambda:  tf.nn.embedding_lookup(self.target_embedding, tf.tile([self.config.eos_token], [self.config.beam_width])),
       lambda: tf.nn.embedding_lookup(self.target_embedding, sample_ids))
   return all_finished, next_inputs
Esempio n. 20
0
  def _process_scale(self, scale, event_ndims):
    """Helper to __init__ which gets scale in batch-ready form.

    This function expands dimensions of `scale` according to the following
    table:
                     event_ndims
    scale.ndims   0            1
              0  [1]+S+[1,1]   "silent error"
              1  [ ]+S+[1,1]   "silent error"
              2  [ ]+S+[1,1]   [1]+S+[ ]
              3  [ ]+S+[1,1]   [ ]+S+[ ]
            ...  (same)        (same)

    The idea is that we want to convert `scale` into something which can always
    work for, say, the left-hand argument of `batch_matmul`.

    Args:
      scale: `Tensor`.
      event_ndims: `Tensor` (0D, `int32`).

    Returns:
      scale: `Tensor` with dims expanded according to [above] table.
      batch_ndims: `Tensor` (0D, `int32`).  The ndims of the `batch` portion.
    """
    ndims = array_ops.rank(scale)
    left = math_ops.select(
        math_ops.reduce_any([
            math_ops.reduce_all([
                math_ops.equal(ndims, 0),
                math_ops.equal(event_ndims, 0)
            ]),
            math_ops.reduce_all([
                math_ops.equal(ndims, 2),
                math_ops.equal(event_ndims, 1)
            ])]), 1, 0)
    right = math_ops.select(math_ops.equal(event_ndims, 0), 2, 0)
    pad = array_ops.concat(0, (
        array_ops.ones([left], dtype=dtypes.int32),
        array_ops.shape(scale),
        array_ops.ones([right], dtype=dtypes.int32)))
    scale = array_ops.reshape(scale, pad)
    batch_ndims = ndims - 2 + right
    return scale, batch_ndims
  def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    """Apply gradients. See base class @{tf.train.Optimizer}."""
    grads = [g for (g, _) in grads_and_vars]

    is_finite_grad = []
    for g in grads:
      is_finite_grad.append(math_ops.reduce_all(gen_math_ops.is_finite(g)))
    is_overall_finite = math_ops.reduce_all(is_finite_grad)

    # Only update gradients when all grads are finite.
    def true_apply_gradients_fn():
      return self._opt.apply_gradients(grads_and_vars, global_step, name)

    update_vars = control_flow_ops.cond(
        is_overall_finite, true_apply_gradients_fn, gen_control_flow_ops.no_op)
    # Potentially adjust gradient scale in case of finite gradients.
    return control_flow_ops.group(
        update_vars,
        self._loss_scale_manager.update_loss_scale(is_overall_finite))
def _logical_and(*args):
  """Convenience function which attempts to statically `reduce_all`."""
  args_ = [_static_value(x) for x in args]
  if any(x is not None and not bool(x) for x in args_):
    return constant_op.constant(False)
  if all(x is not None and bool(x) for x in args_):
    return constant_op.constant(True)
  if len(args) == 2:
    return math_ops.logical_and(*args)
  return math_ops.reduce_all(args)
Esempio n. 23
0
 def next_inputs(self, time, outputs, state, sample_ids, name=None):
   """next_inputs_fn for GreedyEmbeddingHelper."""
   del time, outputs  # unused by next_inputs_fn
   finished = math_ops.equal(sample_ids, self._end_token)
   all_finished = math_ops.reduce_all(finished)
   next_inputs = control_flow_ops.cond(
       all_finished,
       # If we're finished, the next_inputs value doesn't matter
       lambda: self._start_inputs,
       lambda: self._embedding_fn(sample_ids))
   return (finished, next_inputs, state)
Esempio n. 24
0
def _assert_close(x, y, data=None, summarize=None, name=None):
    if x.dtype.is_integer:
        return check_ops.assert_equal(x, y, data=data, summarize=summarize, name=name)

    with ops.op_scope([x, y, data], name, "assert_close"):
        x = ops.convert_to_tensor(x, name="x")
        y = ops.convert_to_tensor(y, name="y")
        tol = np.finfo(x.dtype.as_numpy_dtype).resolution
        if data is None:
            data = ["Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ", y.name, y]
        condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x - y), tol))
        return logging_ops.Assert(condition, data, summarize=summarize)
Esempio n. 25
0
 def max_reduce_fn(state, value):
   """Computes the maximum shape to pad to."""
   condition = math_ops.reduce_all(
       math_ops.logical_or(
           math_ops.less_equal(value.dense_shape, padded_shape),
           math_ops.equal(padded_shape, -1)))
   assert_op = control_flow_ops.Assert(condition, [
       "Actual shape greater than padded shape: ", value.dense_shape,
       padded_shape
   ])
   with ops.control_dependencies([assert_op]):
     return math_ops.maximum(state, value.dense_shape)
  def _assert_non_singular(self):
    if self.dtype.is_complex:
      should_be_nonzero = math_ops.complex_abs(self._diag)
    else:
      should_be_nonzero = self._diag

    nonzero_diag = math_ops.reduce_all(
        math_ops.logical_not(math_ops.equal(should_be_nonzero, 0)))

    return control_flow_ops.Assert(
        nonzero_diag,
        data=["Singular operator: diag contained zero values.", self._diag])
Esempio n. 27
0
    def create_axis_ops(sp_input, num_items, update_fn, axis_name):
      """Creates book-keeping and training ops for a given axis.

      Args:
        sp_input: A SparseTensor corresponding to the row or column batch.
        num_items: An integer, the total number of items of this axis.
        update_fn: A function that takes one argument (`sp_input`), and that
        returns a tuple of
          * new_factors: A flot Tensor of the factor values after update.
          * update_op: a TensorFlow op which updates the factors.
          * loss: A float Tensor, the unregularized loss.
          * reg_loss: A float Tensor, the regularization loss.
          * sum_weights: A float Tensor, the sum of factor weights.
        axis_name: A string that specifies the name of the axis.

      Returns:
        A tuple consisting of:
          * reset_processed_items_op: A TensorFlow op, to be run before the
            beginning of any sweep. It marks all items as not-processed.
          * axis_train_op: A Tensorflow op, to be run during this axis' sweeps.
      """
      processed_items_init = array_ops.fill(dims=[num_items], value=False)
      with ops.colocate_with(processed_items_init):
        processed_items = variable_scope.variable(
            processed_items_init,
            collections=[ops.GraphKeys.GLOBAL_VARIABLES],
            trainable=False,
            name="processed_" + axis_name)
      reset_processed_items_op = state_ops.assign(
          processed_items, processed_items_init,
          name="reset_processed_" + axis_name)
      _, update_op, loss, reg, sum_weights = update_fn(sp_input)
      input_indices = sp_input.indices[:, 0]
      with ops.control_dependencies([
          update_op,
          state_ops.assign(loss_var, loss + reg),
          state_ops.assign(rwse_var, math_ops.sqrt(loss / sum_weights))]):
        with ops.colocate_with(processed_items):
          update_processed_items = state_ops.scatter_update(
              processed_items,
              input_indices,
              array_ops.ones_like(input_indices, dtype=dtypes.bool),
              name="update_processed_{}_indices".format(axis_name))
        with ops.control_dependencies([update_processed_items]):
          is_sweep_done = math_ops.reduce_all(processed_items)
          axis_train_op = control_flow_ops.group(
              global_step_incr_op,
              state_ops.assign(is_sweep_done_var, is_sweep_done),
              state_ops.assign_add(
                  completed_sweeps_var,
                  math_ops.cast(is_sweep_done, dtypes.int32)),
              name="{}_sweep_train_op".format(axis_name))
      return reset_processed_items_op, axis_train_op
def _assert_sparse_indices_are_ragged_right(indices):
  """Checks that the given SparseTensor.indices tensor is ragged-right.

  Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right
  because the entry `[3, 1]` skips a cell.

  Args:
    indices: The SparseTensor indices to check.

  Returns:
    A list of control dependency op tensors.
  """
  index_prefix = indices[:, :-1]
  index_suffix = indices[:, -1]

  # Check whether each index is starting a new row in the innermost dimension
  # (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
  # (Note: this skips the first index; we will check that separately below.)
  index_prefix_changed = math_ops.reduce_any(
      math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)

  # Check two cases:
  #   * For indices that start a new row: index_suffix[i] must be zero.
  #   * For indices that continue a row: index_suffix[i] must be equal to
  #     index_suffix[i-1]+1.
  index_ok = array_ops.where(
      index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
      math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))

  # Also check that the very first index didn't skip any cells.  The first
  # index starts a new row (by definition), so its suffix should be zero.
  sparse_indices_are_ragged_right = math_ops.logical_and(
      math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
      math_ops.reduce_all(index_ok))

  message = [
      'SparseTensor is not right-ragged',
      'SparseTensor.indices =', indices
  ]
  return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
Esempio n. 29
0
def _check_weights_match_logits_and_reshape(weights, logits):
  """Checks that weights shape matches logits and reshapes if needed.

  Consider logits of shape [D0, D1, ... DN, logits_dimension]. Weights shape
  can be either:
  * [D0, D1, ... DN, logits_dimension]
  * [D0, D1, ... DN]: In this case, weights is reshaped into
    [D0, D1, ... DN, 1] to work with weight broadcasting rules.

  Args:
    weights: weights Tensor.
    logits: logits Tensor.
  Returns:
    Validated and reshaped weights Tensor.
  """
  err_msg = (
      'weights shape must be [D0, D1, ... DN], [D0, D1, ... DN, 1] or '
      '[D0, D1, ... DN, logits_dimension]')
  with ops.name_scope(None, 'weights', (weights, logits)) as scope:
    weights_shape = array_ops.shape(weights, name='weights_shape')
    logits_shape = array_ops.shape(logits, name='logits_shape')
    if (weights.shape.ndims is not None and logits.shape.ndims is not None and
        weights.shape.ndims == logits.shape.ndims - 1):
      assert_dimension = check_ops.assert_equal(
          logits_shape[:-1], weights_shape, message=err_msg,
          data=['logits_shape: ', logits_shape,
                'weights_shape: ', weights_shape])
      with ops.control_dependencies([assert_dimension]):
        return array_ops.expand_dims(weights, -1, name=scope)
    supported_weights_shape = array_ops.concat([logits_shape[:-1], [1]], axis=0)
    condition = math_ops.reduce_any(
        [math_ops.reduce_all(math_ops.equal(logits_shape, weights_shape)),
         math_ops.reduce_all(math_ops.equal(
             supported_weights_shape, weights_shape))])
    assert_dimension = control_flow_ops.Assert(
        condition=condition,
        data=[err_msg, 'logits_shape: ', logits_shape,
              'weights_shape: ', weights_shape])
    with ops.control_dependencies([assert_dimension]):
      return array_ops.identity(weights, name=scope)
Esempio n. 30
0
  def step(self, time, inputs, state, name=None):
    """Perform a decoding step.

    Args:
      time: scalar `int32` tensor.
      inputs: A (structure of) input tensors.
      state: A (structure of) state tensors and TensorArrays.
      name: Name scope for any created operations.

    Returns:
      `(outputs, next_state, next_inputs, finished)`.
    """
    batch_size = self._batch_size
    beam_width = self._beam_width
    end_token = self._end_token
    length_penalty_weight = self._length_penalty_weight

    with ops.name_scope(name, "BeamSearchDecoderStep", (time, inputs, state)):
      cell_state = state.cell_state
      inputs = nest.map_structure(self._merge_batch_beams, inputs)
      cell_state = nest.map_structure(self._maybe_merge_batch_beams, cell_state)
      try:
        cell_outputs, next_cell_state = self._cell(
            inputs, cell_state, tiling_factor=beam_width)
      except TypeError as e:
        if "unexpected keyword argument 'tiling_factor'" in str(e):
          cell_outputs, next_cell_state = self._cell(inputs, cell_state)
        else:
          raise

      cell_outputs = nest.map_structure(self._split_batch_beams, cell_outputs)
      next_cell_state = nest.map_structure(self._maybe_split_batch_beams,
                                           next_cell_state)

      if self._output_layer is not None:
        cell_outputs = self._output_layer(cell_outputs)

      beam_search_output, beam_search_state = _beam_search_step(
          time=time,
          logits=cell_outputs,
          beam_state=state,
          batch_size=batch_size,
          beam_width=beam_width,
          end_token=end_token,
          length_penalty_weight=length_penalty_weight)
      finished = beam_search_state.finished
      sample_ids = beam_search_output.predicted_ids
      next_inputs = control_flow_ops.cond(
          math_ops.reduce_all(finished), lambda: self._start_inputs,
          lambda: self._embedding_fn(sample_ids))

    return (beam_search_output, beam_search_state, next_inputs, finished)
Esempio n. 31
0
def is_non_decreasing(x, name=None):
  """Returns `True` if `x` is non-decreasing.

  Elements of `x` are compared in row-major order.  The tensor `[x[0],...]`
  is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
  If `x` has less than two elements, it is trivially non-decreasing.

  See also:  `is_strictly_increasing`

  Args:
    x: Numeric `Tensor`.
    name: A name for this operation (optional).  Defaults to "is_non_decreasing"

  Returns:
    Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.

  Raises:
    TypeError: if `x` is not a numeric tensor.
  """
  with ops.name_scope(name, 'is_non_decreasing', [x]):
    diff = _get_diff_for_monotonic_comparison(x)
    # When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
    zero = ops.convert_to_tensor(0, dtype=diff.dtype)
    return math_ops.reduce_all(math_ops.less_equal(zero, diff))
Esempio n. 32
0
 def masked_add_n(inputs):
     masks = array_ops.stack([x.mask for x in inputs])
     return MaskedTensor(math_ops.add_n([x.values for x in inputs]),
                         math_ops.reduce_all(masks, axis=0))
Esempio n. 33
0
def assert_equal(x, y, data=None, summarize=None, message=None, name=None):
    """Assert the condition `x == y` holds element-wise.

  Example of adding a dependency to an operation:

  ```python
  with tf.control_dependencies([tf.assert_equal(x, y)]):
    output = tf.reduce_sum(x)
  ```

  This condition holds if for every pair of (possibly broadcast) elements
  `x[i]`, `y[i]`, we have `x[i] == y[i]`.
  If both `x` and `y` are empty, this is trivially satisfied.

  Args:
    x:  Numeric `Tensor`.
    y:  Numeric `Tensor`, same dtype as and broadcastable to `x`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`, `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).  Defaults to "assert_equal".

  Returns:
    Op that raises `InvalidArgumentError` if `x == y` is False.
    @compatibility{eager} returns None

  Raises:
    InvalidArgumentError: if the check can be performed immediately and
      `x == y` is False. The check can be performed immediately during eager
      execution or if `x` and `y` are statically known.
  """
    message = message or ''
    with ops.name_scope(name, 'assert_equal', [x, y, data]):
        x = ops.convert_to_tensor(x, name='x')
        y = ops.convert_to_tensor(y, name='y')

        if context.executing_eagerly():
            eq = math_ops.equal(x, y)
            condition = math_ops.reduce_all(eq)
            if not condition:
                # Prepare a message with first elements of x and y.
                summary_msg = ''
                # Default to printing 3 elements like control_flow_ops.Assert (used
                # by graph mode) does.
                summarize = 3 if summarize is None else summarize
                if summarize:
                    # reshape((-1,)) is the fastest way to get a flat array view.
                    x_np = x.numpy().reshape((-1, ))
                    y_np = y.numpy().reshape((-1, ))
                    x_sum = min(x_np.size, summarize)
                    y_sum = min(y_np.size, summarize)
                    summary_msg = ('First %d elements of x:\n%s\n'
                                   'First %d elements of y:\n%s\n' %
                                   (x_sum, x_np[:x_sum], y_sum, y_np[:y_sum]))

                index_and_values_str = ''
                if x.shape == y.shape and x.shape.as_list():
                    # If the shapes of x and y are the same (and not scalars),
                    # Get the values that actually differed and their indices.
                    # If shapes are different this information is more confusing
                    # than useful.
                    mask = math_ops.logical_not(eq)
                    indices = array_ops.where(mask)
                    indices_np = indices.numpy()
                    x_vals = array_ops.boolean_mask(x, mask)
                    y_vals = array_ops.boolean_mask(y, mask)
                    summarize = min(summarize, indices_np.shape[0])
                    index_and_values_str = (
                        'Indices of first %s different values:\n%s\n'
                        'Corresponding x values:\n%s\n'
                        'Corresponding y values:\n%s\n' %
                        (summarize, indices_np[:summarize],
                         x_vals.numpy().reshape(
                             (-1, ))[:summarize], y_vals.numpy().reshape(
                                 (-1, ))[:summarize]))

                raise errors.InvalidArgumentError(
                    node_def=None,
                    op=None,
                    message=(
                        '%s\nCondition x == y did not hold.\n%s%s' %
                        (message or '', index_and_values_str, summary_msg)))
            return

        if data is None:
            data = [
                message, 'Condition x == y did not hold element-wise:',
                'x (%s) = ' % x.name, x,
                'y (%s) = ' % y.name, y
            ]
        condition = math_ops.reduce_all(math_ops.equal(x, y))
        x_static = tensor_util.constant_value(x)
        y_static = tensor_util.constant_value(y)
        if x_static is not None and y_static is not None:
            condition_static = (x_static == y_static).all()
            _assert_static(condition_static, data)
        return control_flow_ops.Assert(condition, data, summarize=summarize)
Esempio n. 34
0
 def condition(unused_time, unused_outputs_ta, unused_state,
               unused_inputs, finished, unused_sequence_lengths):
     return math_ops.logical_not(math_ops.reduce_all(finished))
def _sampled_scattered_embedding_lookup(
    params, values, dimension=None, sampled_candidates=None, hash_key=None,
    name=None):
  """Looks up embeddings using parameter hashing for each value in `values`.

  This method looks up selected embedding dimensions if `sampled_candidates` is
  given, otherwise looks up all dimensions.

  The i-th embedding component of a value v in `values` is found by retrieving
  the weight whose index is a fingerprint of the pair (v,i).
  The concept is explored as "feature hashing" for model compression in this
  paper: http://arxiv.org/pdf/1504.04788.pdf

  Feature hashing has the pleasant effect of allowing us to compute an embedding
  without needing a pre-determined vocabulary, relieving some amount of process
  complexity. It also allows for us to maintain embeddings for possibly
  trillions of features with a fixed amount of memory.

  Note that this is superior to out-of-vocabulary shared "hash buckets" in that
  the embedding is extremely likely to be unique for each token as opposed to
  being shared across probably-colliding tokens. The price is that we must
  compute a hash once for each scalar in the token's embedding as opposed to
  once per token.

  If `params` is a list, it represents a partition of the embedding parameters.
  Each tensor in the list should have the same length, except for the first ones
  which may have an additional element. For instance 10 parameters can be
  partitioned in 4 tensors with length `[3, 3, 2, 2]`.

  Args:
    params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
      Each tensor must be of rank 1 with fully-defined shape.
    values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
    dimension: Embedding dimension. The user must specify either `dimension` or
      `sampled_candidates`.
    sampled_candidates: An optional `Tensor` of slice indices to keep along the
      final dimension with shape `[d0, ..., dn, N]`. If given, `dimension` is
      ignored. If `None`, looks up all candidates.
    hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
      function to combine the crosses fingerprints on SparseFeatureCrossOp
      (optional).
    name: An optional name for this op.

  Returns:
    A `Tensor` with shape `[d0, ..., dn, dimension]`.
    If `sampled_candidates` is given, the output shape is `[d0, ..., dn, N]`

  Raises:
    ValueError: if dimension is not positive or the partition size is invalid.
  """
  if isinstance(params, variables.PartitionedVariable):
    params = list(params)
  if not isinstance(params, list):
    params = [params]

  with ops.name_scope(name, "scattered_embedding_lookup",
                      params + [dimension, values]):
    # Flatten the values
    values_shape = array_ops.shape(values)
    values = array_ops.reshape(values, [-1, 1])

    if sampled_candidates is None:
      if dimension is None:
        raise ValueError(
            "You must specify either dimension or sampled_candidates.")
      if dimension <= 0:
        raise ValueError("Dimension must be >0. Given is %d" % dimension)
      sampled_candidates = array_ops.tile(array_ops.expand_dims(
          math_ops.range(0, dimension), 0), array_ops.shape(values))
    else:
      dimension = array_ops.shape(sampled_candidates)[
          math_ops.subtract(array_ops.rank(sampled_candidates), 1)]
      sampled_candidates_shape = array_ops.shape(sampled_candidates)
      dimension_tensor = array_ops.reshape(dimension, shape=[1,])
      expected_shape = array_ops.concat([values_shape, dimension_tensor], 0)
      with ops.control_dependencies([control_flow_ops.Assert(
          math_ops.reduce_all(math_ops.equal(sampled_candidates_shape,
                                             expected_shape)),
          ["The shape of sampled_candidates: ", sampled_candidates_shape,
           " does not match the shape of values: ", values_shape])]):
        # Flatten sampled_candidates, same way as values are flattened.
        sampled_candidates = array_ops.reshape(sampled_candidates,
                                               [-1, dimension])

    num_partitions = len(params)
    partition_sizes = []
    for p in range(num_partitions):
      shape = params[p].get_shape()
      shape.assert_has_rank(1)
      shape.assert_is_fully_defined()
      partition_sizes.append(shape[0].value)
    num_params = sum(partition_sizes)  # Total number of parameters.

    # Assert the size of each partition.
    for p in range(num_partitions):
      expected_size = (num_params - p - 1) // num_partitions + 1
      if partition_sizes[p] != expected_size:
        raise ValueError("Tensor %d in params has size %d, expected %d." %
                         (p, partition_sizes[p], expected_size))

    # With two values v1 and v2 and 3 dimensions, we will cross
    # [[0, 1, 2], [0, 1, 2]] with [[v1], [v2]].
    tensors_to_cross = [sampled_candidates, values]
    ids = sparse_feature_cross_op.sparse_feature_cross(
        tensors_to_cross, hashed_output=True, num_buckets=num_params,
        hash_key=hash_key)
    ids = sparse_ops.sparse_tensor_to_dense(ids)

    # No need to validate the indices since we have checked the params
    # dimensions and we know the largest id.
    result = embedding_ops.embedding_lookup(
        params, ids, partition_strategy="div")

    return array_ops.reshape(result,
                             array_ops.concat([values_shape, [dimension]], 0))
    def step(self, time, inputs, state, mec_attr, name=None):
        """Perform a decoding step.

    Args:
      time: scalar `int32` tensor.
      inputs: A (structure of) input tensors.
      state: A (structure of) state tensors and TensorArrays.
      name: Name scope for any created operations.

    Returns:
      `(outputs, next_state, next_inputs, finished)`.
    """

        batch_size = self._batch_size
        beam_width = self._beam_width
        end_token = self._end_token
        users = self._users
        batch = self._batch
        beam = self._beam
        mec_attr = mec_attr
        user_attr = self._user_attr
        num_attr = self._num_attr
        embed_vocab = self._vocab_size
        vocab = embed_vocab - 2
        #batch = self._batch
        length_penalty_weight = self._length_penalty_weight

        with ops.name_scope(name, "BeamSearchDecoderStep",
                            (time, inputs, state)):
            cell_state = state.cell_state
            inputs = nest.map_structure(
                lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]),
                inputs)
            cell_state = nest.map_structure(self._maybe_merge_batch_beams,
                                            cell_state, self._cell.state_size)
            cell_outputs, next_cell_state = self._cell(inputs, cell_state)
            cell_outputs = nest.map_structure(
                lambda out: self._split_batch_beams(out, out.shape[1:]),
                cell_outputs)
            next_cell_state = nest.map_structure(self._maybe_split_batch_beams,
                                                 next_cell_state,
                                                 self._cell.state_size)

            if self._output_layer is not None:
                cell_outputs = self._output_layer(cell_outputs)
            '''temp = tensorflow.print(cell_outputs, output_stream=sys.stdout)
      y = tensorflow.constant(0,shape=[batch,1],dtype=tensorflow.int32)
      tile_block = tensorflow.tile(tensorflow.expand_dims(block,1),[1,beam,1,1])
      x = tensorflow.one_hot(y,users+1) #
      x = tensorflow.expand_dims(x,-1)
      
      blocked_val = tensorflow.reduce_sum((x*tile_block),axis=2)
      cell_outputs=tensorflow.multiply(blocked_val,cell_outputs)'''
            z = tf.zeros([batch, vocab], dtype=tf.float32)
            ideal = tf.ones([batch, vocab], dtype=tf.float32)
            multiply = tf.constant([batch], dtype=tf.int32)  #2 is batch size
            index = tf.zeros([batch, 1], dtype=tf.int32)
            t = tf.cast(tf.one_hot(time, users), dtype=tf.int32)
            t = tf.tile(t, multiply)
            t = tf.reshape(t, shape=[batch, users, 1])
            ids = tf.zeros(shape=[batch, 1], dtype=tf.int32)
            current_user_attr = tf.zeros(shape=[batch, num_attr],
                                         dtype=tf.int32)
            temp = tf.constant(0, dtype=tf.int32)

            def body(ideal, z, cell_outputs, ids, current_user_attr, temp):
                temp = temp + 1
                i, index = tf.nn.top_k(cell_outputs, k=1)
                index = tf.reshape(index, [batch, 1])
                ids = index + 2
                ids = tf.cast(tf.one_hot(ids, embed_vocab), dtype=tf.int32)
                ids = tf.reshape(ids, shape=[batch, embed_vocab, 1])
                current_user_attr = tf.reduce_sum(t * user_attr, axis=1)
                current_base_attr = tf.reduce_sum(ids * mec_attr, axis=1)
                diff_bool = tf.math.greater_equal(current_base_attr,
                                                  current_user_attr)
                diff_bool = tf.reduce_all(diff_bool, axis=1)
                diff_int = tf.expand_dims(tf.cast(diff_bool, tf.int32), axis=1)
                ii, _ = tf.meshgrid(tf.range(batch),
                                    tf.range(1),
                                    indexing='ij')
                ind = tf.stack([ii, index], axis=-1)
                ind = tf.reshape(ind, [batch, 2])
                x = tf.sparse_to_dense(
                    sparse_indices=ind,
                    output_shape=[batch, vocab],
                    default_value=1,
                    sparse_values=0,
                )
                z = tf.cast(tf.bitwise.bitwise_or(x, diff_int),
                            dtype=tf.float32)
                z = tf.where(
                    tf.equal(z, 0),
                    tf.constant(-20, shape=[batch, vocab], dtype=tf.float32),
                    z)
                z = tf.expand_dims(z, axis=1)
                #z = tf.Print(z,[z[1]],"z")
                cell_outputs = tf.multiply(cell_outputs, z)

                #logits = tf.Print(logits,[logits[0]],"logits")
                return (ideal, z, cell_outputs, ids, current_user_attr, temp)

            def condition(ideal, z, cell_outputs, ids, current_user_attr,
                          temp):
                #return tf.less(temp,tf.constant(1,dtype=tf.int32))
                return tf.logical_not(tf.reduce_all(tf.equal(ideal, z)))

            ideal, z, cell_outputs, ids, current_user_attr, temp = tf.while_loop(
                condition,
                body,
                loop_vars=[
                    ideal, z, cell_outputs, ids, current_user_attr, temp
                ],
                shape_invariants=[
                    ideal.get_shape(),
                    tf.TensorShape(None),
                    cell_outputs.get_shape(),
                    tf.TensorShape(None),
                    tf.TensorShape(None),
                    temp.get_shape()
                ])
            y = tf.multiply(ids, tf.expand_dims(current_user_attr, axis=1))
            mec_attr = tf.subtract(mec_attr, y)
            end = tf.constant(users, dtype=tf.int32)

            def end_prediction(cell_outputs):
                ids = tf.cast(tf.one_hot(0, vocab), dtype=tf.float32)
                ids = tf.multiply(ids, 10)
                cell_outputs = tf.multiply(cell_outputs, ids)
                return cell_outputs

            def pass_val(cell_outputs):
                return cell_outputs

            cell_outputs = tf.cond(tf.equal(time, end),
                                   lambda: end_prediction(cell_outputs),
                                   lambda: pass_val(cell_outputs))

            #self._mec_attr = mec_attr
            beam_search_output, beam_search_state, word_indices = _beam_search_step(
                time=time,
                logits=cell_outputs,
                next_cell_state=next_cell_state,
                beam_state=state,
                batch_size=batch_size,
                beam_width=beam_width,
                end_token=end_token,
                length_penalty_weight=length_penalty_weight,
            )

            finished = beam_search_state.finished
            sample_ids = beam_search_output.predicted_ids
            next_inputs = control_flow_ops.cond(
                math_ops.reduce_all(finished), lambda: self._start_inputs,
                lambda: self._embedding_fn(sample_ids, time + 1))

        #z = tf.Print(z,[z],"zlllll",summarize=80)
        #mec_attr = tf.Print(mec_attr,[mec_attr],"mec_attr",summarize=100)
        #cell_outputs = tf.Print(cell_outputs,[cell_outputs],"cell output",summarize=80)
        z = tf.reshape(z, [batch, 1, vocab])
        mec_attr = tf.reshape(mec_attr, [batch, embed_vocab, num_attr])
        #self._imp = tf.concat([self._imp,z],axis=1)
        return (beam_search_output, beam_search_state, next_inputs, finished,
                z, mec_attr, cell_outputs)
 def _tf_reduce_all(self, x, reduction_axes, keep_dims):
     return math_ops.reduce_all(x, reduction_axes, keep_dims)
Esempio n. 38
0
 def f(x):
   return control_flow_ops.cond(
       math_ops.reduce_all(x > 1), lambda: 1. / x, lambda: x)
Esempio n. 39
0
    def broadcast_dimension(self, axis, lengths):
        """Returns a shape that is broadcast-compatible with self & lengths.

    * If dimension[axis] is uniform and lengths is a scalar, the check
      that either lengths==1 or axis==1 or lengths==axis, and tile
      dimension[axis] with tf.where(lengths==axis, 1, axis) repeats.

    * If dimension[axis] is uniform and lengths is a vector, then check
      that dimension[axis]==1, and raggedly tile dimension[axis] with
      lengths repeats.  (we can skip tiling if we statically know that
      slice_lengths == 1??)

    * If dimension[axis] is ragged and lengths is a scalar, then check
      that lengths==1.

    * If dimension[axis] is ragged and lengths is a vector, then check
      that self.dimension_size(axis) == lengths.

    Args:
      axis: `int`.  The dimension to broadcast.
      lengths: 0-D or 1-D integer `Tensor`.

    Returns:
      A `RaggedTensorDynamicShape`.
    """
        lengths = ragged_util.convert_to_int_tensor(lengths,
                                                    name='lengths',
                                                    dtype=self.dim_size_dtype)
        # Check whether lengths is a scalar (for uniform dimensions) or
        # vector (for ragged dimensions).
        if lengths.shape.ndims is None:
            raise ValueError('lengths must have a known rank.')
        elif lengths.shape.ndims > 1:
            raise ValueError('lengths must be a scalar or vector')
        else:
            lengths_is_scalar = (lengths.shape.ndims == 0)

        # Verify that the shapes are compatible.
        if self.is_ragged(axis):
            if lengths_is_scalar:
                condition = math_ops.equal(lengths, 1)
            else:
                condition = math_ops.reduce_all(
                    math_ops.equal(lengths, self.dimension_size(axis)))
        else:
            axis_dim_size = self.dimension_size(axis)
            if lengths_is_scalar:
                condition = (math_ops.equal(lengths, 1)
                             | math_ops.equal(axis_dim_size, 1)
                             | math_ops.equal(axis_dim_size, lengths))
            else:
                condition = math_ops.equal(axis_dim_size, 1)
        broadcast_err = [
            'Unable to broadcast: dimension size mismatch in dimension', axis,
            'lengths=', lengths, 'dim_size=',
            self.dimension_size(axis)
        ]
        broadcast_check = control_flow_ops.Assert(condition,
                                                  data=broadcast_err,
                                                  summarize=10)

        with ops.control_dependencies([broadcast_check]):
            # Partitioned dimensions:
            if axis < self.num_partitioned_dimensions:
                if self.is_ragged(axis):
                    # Use an identity op to make sure the check actually gets run.
                    return RaggedTensorDynamicShape(
                        self._partitioned_dim_sizes,
                        array_ops.identity(self.inner_dim_sizes))
                else:
                    return self._broadcast_uniform_partitioned_dimension(
                        axis, lengths)

            # Inner dimensions:
            else:
                if lengths_is_scalar:
                    return self._broadcast_inner_dimension_to_uniform(
                        axis, lengths)
                else:
                    if axis == 0:
                        raise ValueError(
                            'Unable to broadcast: '
                            'outermost dimension must be uniform.')
                    return self._broadcast_inner_dimension_to_ragged(
                        axis, lengths)
    def step(self, time, inputs, state, name=None):
        """Perform a decoding step.

    Args:
      time: scalar `int32` tensor.
      inputs: A (structure of) input tensors.
      state: A (structure of) state tensors and TensorArrays.
      name: Name scope for any created operations.

    Returns:
      `(outputs, next_state, next_inputs, finished)`.
    """
        batch_size = self._batch_size
        beam_width = self._beam_width
        end_token = self._end_token
        length_penalty_weight = self._length_penalty_weight
        coverage_penalty_weight = self._coverage_penalty_weight
        constrained_matrix = self._constrained_matrix
        if not constrained_matrix == None:
            constrained_matrix = tf.expand_dims(constrained_matrix, 1)
            constrained_matrix = tf.tile(constrained_matrix,
                                         [1, beam_width, 1])

        with ops.name_scope(name, "ConstrainedBeamSearchDecoderStep",
                            (time, inputs, state)):
            cell_state = state.cell_state
            inputs = nest.map_structure(
                lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]),
                inputs)
            cell_state = nest.map_structure(self._maybe_merge_batch_beams,
                                            cell_state, self._cell.state_size)
            cell_outputs, next_cell_state = self._cell(inputs, cell_state)
            cell_outputs = nest.map_structure(
                lambda out: self._split_batch_beams(out, out.shape[1:]),
                cell_outputs)
            next_cell_state = nest.map_structure(self._maybe_split_batch_beams,
                                                 next_cell_state,
                                                 self._cell.state_size)

            if self._output_layer is not None:
                cell_outputs = self._output_layer(cell_outputs)

            beam_search_output, beam_search_state = _beam_search_step(
                time=time,
                logits=cell_outputs,
                next_cell_state=next_cell_state,
                beam_state=state,
                batch_size=batch_size,
                beam_width=beam_width,
                end_token=end_token,
                length_penalty_weight=length_penalty_weight,
                coverage_penalty_weight=coverage_penalty_weight,
                constrained_matrix=constrained_matrix)

            finished = beam_search_state.finished
            sample_ids = beam_search_output.predicted_ids
            next_inputs = control_flow_ops.cond(
                math_ops.reduce_all(finished), lambda: self._start_inputs,
                lambda: self._embedding_fn(sample_ids))

        return (beam_search_output, beam_search_state, next_inputs, finished)
Esempio n. 41
0
 def f(x, d):
   if math_ops.reduce_all(
       math_ops.greater(x, random_ops.random_normal([10, 10]))):
     return array_ops.reshape(x * 2, constant_op.constant([100]))
   else:
     return array_ops.reshape(x * 3, d)
Esempio n. 42
0
def _is_all_finite(grads):
    """Returns a scalar boolean tensor indicating if all gradients are finite."""
    is_finite_per_grad = [
        math_ops.reduce_all(math_ops.is_finite(g)) for g in grads
    ]
    return math_ops.reduce_all(is_finite_per_grad)
Esempio n. 43
0
  def _create_switch_ops(self,
                         processed_row_indices,
                         processed_col_indices,
                         train_op):
    """Creates ops to update is_row_sweep_var and to increment global_step.

    Creates two boolean tensors processed_rows and processed_cols, which keep
    track of which rows/cols have been processed during the current sweep.
    Returns ops that should be run after each row / col update.
      - When is_row_sweep_var is True, it sets
        processed_rows[processed_row_indices] to True.
      - When is_row_sweep_var is False, it sets
        processed_cols[processed_col_indices] to True .
    When all rows or all cols have been processed, negates is_row_sweep_var and
    resets processed_rows and processed_cols to False.
    All of the ops created by this function have control_dependencies on
    train_op.

    Args:
      processed_row_indices: A Tensor. The indices of the input rows that are
        processed during the current sweep.
      processed_col_indices: A Tensor. The indices of the input columns that
        are processed during the current sweep.
      train_op: An op. All the ops created by this function have
        control_dependencies on train_op.
    Returns:
      A list consisting of:
        is_sweep_done: A Boolean tensor, determines whether the sweep is done,
          i.e. all rows (during a row sweep) or all columns (during a column
          sweep) have been processed.
        switch_ops: An op that updates is_row_sweep_var when is_sweep_done is
          True. Has control_dependencies on train_op.
        global_step_incr_op: An op that increments the global_step counter. Has
          control_dependenciens on switch_ops.
    """
    processed_rows_init = array_ops.fill(dims=[self._num_rows], value=False)
    with ops.colocate_with(processed_rows_init):
      processed_rows = variables.Variable(
          processed_rows_init,
          collections=[ops.GraphKeys.GLOBAL_VARIABLES],
          trainable=False,
          name="sweep_hook_processed_rows")
    processed_cols_init = array_ops.fill(dims=[self._num_cols], value=False)
    with ops.colocate_with(processed_cols_init):
      processed_cols = variables.Variable(
          processed_cols_init,
          collections=[ops.GraphKeys.GLOBAL_VARIABLES],
          trainable=False,
          name="sweep_hook_processed_cols")
    # After running the train_op, update processed_rows or processed_cols
    # tensors, depending on whether we are currently doing a row or a col sweep
    with ops.control_dependencies([train_op]):
      def get_row_update_op():
        with ops.colocate_with(processed_rows):
          return state_ops.scatter_update(
              processed_rows, processed_row_indices,
              array_ops.ones_like(processed_row_indices, dtype=dtypes.bool))

      def get_col_update_op():
        with ops.colocate_with(processed_cols):
          return state_ops.scatter_update(
              processed_cols, processed_col_indices,
              array_ops.ones_like(processed_col_indices, dtype=dtypes.bool))

      update_processed_op = control_flow_ops.cond(
          self._is_row_sweep_var, get_row_update_op, get_col_update_op)

      # After update_processed_op, check whether we have completed a sweep.
      # If this is the case, flip the is_row_sweep_var and reset processed_rows
      # and processed_cols tensors.
      with ops.control_dependencies([update_processed_op]):
        def get_switch_op():
          return state_ops.assign(
              self._is_row_sweep_var,
              gen_math_ops.logical_not(self._is_row_sweep_var)).op

        def get_reset_op():
          return control_flow_ops.group(
              state_ops.assign(processed_rows, processed_rows_init).op,
              state_ops.assign(processed_cols, processed_cols_init).op)

        is_sweep_done = control_flow_ops.cond(
            self._is_row_sweep_var,
            lambda: math_ops.reduce_all(processed_rows),
            lambda: math_ops.reduce_all(processed_cols),
            name="sweep_hook_is_sweep_done")
        switch_op = control_flow_ops.cond(
            is_sweep_done, get_switch_op, control_flow_ops.no_op,
            name="sweep_hook_switch_op")
        reset_op = control_flow_ops.cond(
            is_sweep_done, get_reset_op, control_flow_ops.no_op,
            name="sweep_hook_reset_op")
        switch_ops = control_flow_ops.group(switch_op, reset_op,
                                            name="sweep_hook_switch_ops")

        # Op to increment the global step
        global_step = framework_variables.get_global_step()
        with ops.control_dependencies([switch_ops]):
          if global_step is not None:
            global_step_incr_op = state_ops.assign_add(
                global_step, 1, name="global_step_incr").op
          else:
            global_step_incr_op = control_flow_ops.no_op(
                name="global_step_incr")

    return [is_sweep_done, switch_ops, global_step_incr_op]
Esempio n. 44
0
def assert_near(x,
                y,
                rtol=None,
                atol=None,
                data=None,
                summarize=None,
                message=None,
                name=None):
    """Assert the condition `x` and `y` are close element-wise.

  Example of adding a dependency to an operation:

  ```python
  with tf.control_dependencies([tf.assert_near(x, y)]):
    output = tf.reduce_sum(x)
  ```

  This condition holds if for every pair of (possibly broadcast) elements
  `x[i]`, `y[i]`, we have

  ```tf.abs(x[i] - y[i]) <= atol + rtol * tf.abs(y[i])```.

  If both `x` and `y` are empty, this is trivially satisfied.

  The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest
  representable positive number such that `1 + eps != eps`.  This is about
  `1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`.
  See `numpy.finfo`.

  Args:
    x:  Float or complex `Tensor`.
    y:  Float or complex `Tensor`, same `dtype` as, and broadcastable to, `x`.
    rtol:  `Tensor`.  Same `dtype` as, and broadcastable to, `x`.
      The relative tolerance.  Default is `10 * eps`.
    atol:  `Tensor`.  Same `dtype` as, and broadcastable to, `x`.
      The absolute tolerance.  Default is `10 * eps`.
    data:  The tensors to print out if the condition is False.  Defaults to
      error message and first few entries of `x`, `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).  Defaults to "assert_near".

  Returns:
    Op that raises `InvalidArgumentError` if `x` and `y` are not close enough.

  @compatibility(numpy)
  Similar to `numpy.assert_allclose`, except tolerance depends on data type.
  This is due to the fact that `TensorFlow` is often used with `32bit`, `64bit`,
  and even `16bit` data.
  @end_compatibility
  """
    message = message or ''
    with ops.name_scope(name, 'assert_near', [x, y, rtol, atol, data]):
        x = ops.convert_to_tensor(x, name='x')
        y = ops.convert_to_tensor(y, name='y', dtype=x.dtype)

        eps = np.finfo(x.dtype.as_numpy_dtype).eps
        rtol = 10 * eps if rtol is None else rtol
        atol = 10 * eps if atol is None else atol

        rtol = ops.convert_to_tensor(rtol, name='rtol', dtype=x.dtype)
        atol = ops.convert_to_tensor(atol, name='atol', dtype=x.dtype)

        if context.executing_eagerly():
            x_name = _shape_and_dtype_str(x)
            y_name = _shape_and_dtype_str(y)
        else:
            x_name = x.name
            y_name = y.name

        if data is None:
            data = [
                message,
                'x and y not equal to tolerance rtol = %s, atol = %s' %
                (rtol, atol),
                'x (%s) = ' % x_name, x,
                'y (%s) = ' % y_name, y
            ]
        tol = atol + rtol * math_ops.abs(y)
        diff = math_ops.abs(x - y)
        condition = math_ops.reduce_all(math_ops.less(diff, tol))
        return control_flow_ops.Assert(condition, data, summarize=summarize)
def _dynamic_att_loop(decoder,
                      cell,
                      inputs,
                      initial_state,
                      context,
                      parallel_iterations,
                      swap_memory,
                      use_coverage=False,
                      sequence_length=None,
                      att_sequence_length=None,
                      dtype=None):
    """Internal implementation of Dynamic RNN.
  Args:
    cell: An instance of RNNCell.
    inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
      tuple of such elements.
    initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
      `cell.state_size` is a tuple, then this should be a tuple of
      tensors having shapes `[batch_size, s] for s in cell.state_size`.
    parallel_iterations: Positive Python int.
    swap_memory: A Python boolean
    sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
    dtype: (optional) Expected dtype of output. If not specified, inferred from
      initial_state.
  Returns:
    Tuple `(final_outputs, final_state)`.
    final_outputs:
      A `Tensor` of shape `[time, batch_size, cell.output_size]`.  If
      `cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
      objects, then this returns a (possibly nsted) tuple of Tensors matching
      the corresponding shapes.
    final_state:
      A `Tensor`, or possibly nested tuple of Tensors, matching in length
      and shapes to `initial_state`.
  Raises:
    ValueError: If the input depth cannot be inferred via shape inference
      from the inputs.
  """
    state = initial_state
    assert isinstance(parallel_iterations,
                      int), "parallel_iterations must be int"

    state_size = cell.state_size

    flat_input = nest.flatten(inputs)
    flat_output_size = nest.flatten(cell.output_size)

    # Construct an initial output
    input_shape = array_ops.shape(flat_input[0])
    time_steps = input_shape[0]
    batch_size = input_shape[1]

    is_infer = decoder.is_infer
    if use_coverage:
        encoded_fert_init, encoded_fertility, context = context
        encoded_fert_init = array_ops.identity(encoded_fert_init,
                                               name="encoded_fert_init")
    else:
        encoded_fert_init, encoded_fertility = 0, 0
        # coverage_shape = encoded_fert_init.get_shape().as_list()
        # flat_coverage = nest.flatten(encoded_fert_init)

    inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
                             for input_ in flat_input)

    const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]

    for shape in inputs_got_shape:
        if not shape[2:].is_fully_defined():
            raise ValueError(
                "Input size (depth of inputs) must be accessible via shape inference,"
                " but saw value None.")
        got_time_steps = shape[0].value
        got_batch_size = shape[1].value
        if const_time_steps != got_time_steps:
            raise ValueError(
                "Time steps is not the same for all the elements in the input in a "
                "batch.")
        if const_batch_size != got_batch_size:
            raise ValueError(
                "Batch_size is not the same for all the elements in the input."
            )

    # Prepare dynamic conditional copying of state & output
    def _create_zero_arrays(size):
        size = _concat(batch_size, size)
        return array_ops.zeros(array_ops.stack(size),
                               _infer_state_dtype(dtype, state))

    flat_zero_output = tuple(
        _create_zero_arrays(output) for output in flat_output_size)
    zero_output = nest.pack_sequence_as(structure=cell.output_size,
                                        flat_sequence=flat_zero_output)

    if sequence_length is not None:
        min_sequence_length = math_ops.reduce_min(sequence_length)
        max_sequence_length = math_ops.reduce_max(sequence_length)

    time = array_ops.constant(0, dtype=dtypes.int32, name="time")

    with ops.name_scope("dynamic_att_rnn") as scope:
        base_name = scope

    def _create_ta(steps, name, dtype):
        if not is_infer:
            return tensor_array_ops.TensorArray(dtype=dtype,
                                                size=steps,
                                                tensor_array_name=base_name +
                                                name)
        else:
            return tensor_array_ops.TensorArray(dtype=dtype,
                                                size=0,
                                                dynamic_size=True,
                                                tensor_array_name=base_name +
                                                name)

    output_ta = tuple(
        _create_ta(time_steps, "output_%d" %
                   i, _infer_state_dtype(dtype, state))
        for i in range(len(flat_output_size)))
    if is_infer:
        create_extra_ = lambda s, n: nest.map_structure(
            partial(_create_ta, s, n), decoder.extra_dtype)
        extra_output_ta = tuple(
            create_extra_(time_steps, "extra_output_%d" % i)
            for i in range(len(flat_output_size)))
    else:
        extra_output_ta = tuple(
            _create_ta(time_steps, "logits_%d" %
                       i, _infer_state_dtype(dtype, state))
            for i in range(len(flat_output_size)))

    input_ta = tuple(
        _create_ta(time_steps, "input_%d" % i, flat_input[i].dtype)
        for i in range(len(flat_input)))

    input_ta = tuple(
        ta.unstack(input_) for ta, input_ in zip(input_ta, flat_input))

    # coverage_ta = tuple(_create_ta(time_steps + 1, "coverage_%d" % i,
    #                                _infer_state_dtype(dtype, state))
    #                   for i in range(len(flat_coverage)))
    # if use_coverage:
    #   coverage_ta = tuple(ta.write(0, coverage_)
    #                       for ta, coverage_ in zip(coverage_ta, flat_coverage))

    def _time_step(time, input_ta_t, output_ta_t, extra_output_ta_t,
                   last_coverage, state, finished):
        """Take a time step of the dynamic RNN.
    Args:
      time: int32 scalar Tensor.
      output_ta_t: List of `TensorArray`s that represent the output.
      state: nested tuple of vector tensors that represent the state.
    Returns:
      The tuple (time + 1, output_ta_t with updated flow, new_state).
    """

        input_t = tuple(ta.read(time) for ta in input_ta_t)
        # Restore some shape information
        for input_, shape in zip(input_t, inputs_got_shape):
            input_.set_shape(shape[1:])

        input_t = nest.pack_sequence_as(structure=inputs,
                                        flat_sequence=input_t)

        rnn_state = state if not is_infer else state[0]
        if use_coverage:
            # coverage_t = tuple(ta.read(time) for ta in coverage_ta_t)
            # coverage_t = nest.pack_sequence_as(structure=encoded_fert_init, flat_sequence=coverage_t)
            # coverage_t.set_shape(coverage_shape)
            ctx, att_weights, new_coverage = decoder.attention_step(
                rnn_state, state_size, context, att_sequence_length,
                last_coverage, encoded_fertility)

            # new_coverage = nest.flatten(new_coverage)
            # coverage_ta_t = tuple(ta.write(time + 1, coverage)
            #                       for ta, coverage in zip(coverage_ta_t, new_coverage))
        else:
            new_coverage = last_coverage
            ctx, att_weights = decoder.attention_step(rnn_state,
                                                      state_size,
                                                      context,
                                                      use_coverage=False)

        call_cell = lambda: cell(array_ops.concat([input_t, ctx], 1), rnn_state
                                 )

        if sequence_length is not None:
            (output,
             new_state) = _rnn_step(time=time,
                                    sequence_length=sequence_length,
                                    min_sequence_length=min_sequence_length,
                                    max_sequence_length=max_sequence_length,
                                    zero_output=zero_output,
                                    state=rnn_state,
                                    call_cell=call_cell,
                                    state_size=state_size,
                                    skip_conditionals=True)
        else:
            (output, new_state) = call_cell()
            assert is_infer, "Manually zero output when inferring."
            output = nest.map_structure(
                lambda out, zero: array_ops.where(finished, zero, out), output,
                zero_output)

        logits = decoder.logit_fn(output)
        if is_infer:
            mix_state = (new_state, state[1])
            (new_output, extra_output, new_state, new_input_t,
             search_finished) = decoder.search_step(time, output, logits,
                                                    mix_state)

            new_input_t = nest.flatten(new_input_t)
            input_ta_t = tuple(
                ta.write(time + 1, new_input_)
                for ta, new_input_ in zip(input_ta_t, new_input_t))

            new_finished = math_ops.logical_or(search_finished, finished)
            extra_output = nest.pack_sequence_as(structure=extra_output_ta,
                                                 flat_sequence=extra_output)
            output = new_output
        else:
            extra_output = logits
            extra_output = nest.flatten(extra_output)
            new_finished = finished

        # Pack state if using state tuples
        output = nest.flatten(output)

        output_ta_t = tuple(
            ta.write(time, out) for ta, out in zip(output_ta_t, output))

        if is_infer:
            write_ta_ = lambda t, o: t.write(time, o)
            map_write_ = lambda ta, out: nest.map_structure(write_ta_, ta, out)
            extra_output_ta_t = tuple(
                map_write_(ta, out)
                for ta, out in zip(extra_output_ta_t, extra_output))
        else:
            extra_output_ta_t = tuple(
                ta.write(time, out)
                for ta, out in zip(extra_output_ta_t, extra_output))

        return (time + 1, input_ta_t, output_ta_t, extra_output_ta_t,
                new_coverage, new_state, new_finished)

    if is_infer:
        init_finished = array_ops.tile([False], [batch_size])
        init_state = (state, decoder.initial_search_state)
        condition_fn = lambda *args: math_ops.logical_not(
            math_ops.reduce_all(args[-1]))
    else:
        init_finished = False
        init_state = state
        condition_fn = lambda time, *_: time < time_steps

    res = control_flow_ops.while_loop(
        cond=condition_fn,
        body=_time_step,
        loop_vars=(time, input_ta, output_ta, extra_output_ta,
                   encoded_fert_init, init_state, init_finished),
        parallel_iterations=parallel_iterations,
        swap_memory=swap_memory)

    # Unpack final output if not using output tuples.
    final_outputs = tuple(ta.stack() for ta in res[2])
    final_extra = tuple(
        nest.map_structure(lambda ta: ta.stack(), ta) for ta in res[3])
    final_state = res[-2]

    # Restore some shape information
    if not is_infer:
        vocab_size = decoder.vocab_size
        for output, extra, output_size in zip(final_outputs, final_extra,
                                              flat_output_size):
            shape = _concat([const_time_steps, const_batch_size],
                            output_size,
                            static=True)
            output.set_shape(shape)
            extra_shape = _concat([const_time_steps, const_batch_size],
                                  vocab_size,
                                  static=True)
            extra.set_shape(extra_shape)

        final_outputs = nest.pack_sequence_as(structure=cell.output_size,
                                              flat_sequence=final_outputs)

        final_extra = nest.pack_sequence_as(structure=vocab_size,
                                            flat_sequence=final_extra)

    return (final_outputs, final_extra, final_state)
Esempio n. 46
0
 def f(a1, a2):
   if a1.shape != a2.shape:
     return constant_op.constant(False)
   return math_ops.reduce_all(math_ops.equal(a1, a2))
Esempio n. 47
0
def _all_equal(tensor0, tensor1):
  with ops.op_scope([tensor0, tensor1], 'all_equal') as scope:
    return math_ops.reduce_all(
        math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
Esempio n. 48
0
 def test(self):
     result_lt = ops.reduce_all(self.bool_lt, {'channel'})
     golden_lt = core.LabeledTensor(
         math_ops.reduce_all(self.bool_tensor, 1),
         [self.a0, self.a2, self.a3])
     self.assertLabeledTensorsEqual(result_lt, golden_lt)
Esempio n. 49
0
def _assert_all_positive(x):
    return logging_ops.Assert(
        math_ops.reduce_all(x > 0),
        ["Tensor %s should contain only positive values: " % x.name, x])
Esempio n. 50
0
    def next_inputs(self, time, outputs, state, sample_ids, name=None):
        """Gets the next inputs for next step."""
        with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
                            [time, outputs, state, sample_ids]):
            (finished, base_next_inputs,
             state) = (super(ScheduledOutputTrainingHelper,
                             self).next_inputs(time=time,
                                               outputs=outputs,
                                               state=state,
                                               sample_ids=sample_ids,
                                               name=name))
            sample_ids = math_ops.cast(sample_ids, dtypes.bool)

            def maybe_sample():
                """Perform scheduled sampling."""
                def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
                    """Concatenate outputs with auxiliary inputs, if they exist."""
                    if self._auxiliary_input_tas is None:
                        return outputs_

                    next_time = time + 1
                    auxiliary_inputs = nest.map_structure(
                        lambda ta: ta.read(next_time),
                        self._auxiliary_input_tas)
                    if indices is not None:
                        auxiliary_inputs = array_ops.gather_nd(
                            auxiliary_inputs, indices)
                    return nest.map_structure(
                        lambda x, y: array_ops.concat((x, y), -1), outputs_,
                        auxiliary_inputs)

                if self._next_inputs_fn is None:
                    return array_ops.where(
                        sample_ids,
                        maybe_concatenate_auxiliary_inputs(outputs),
                        base_next_inputs)

                where_sampling = math_ops.cast(array_ops.where(sample_ids),
                                               dtypes.int32)
                where_not_sampling = math_ops.cast(
                    array_ops.where(math_ops.logical_not(sample_ids)),
                    dtypes.int32)
                outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
                inputs_not_sampling = array_ops.gather_nd(
                    base_next_inputs, where_not_sampling)
                sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
                    self._next_inputs_fn(outputs_sampling), where_sampling)

                base_shape = array_ops.shape(base_next_inputs)
                return (array_ops.scatter_nd(indices=where_sampling,
                                             updates=sampled_next_inputs,
                                             shape=base_shape) +
                        array_ops.scatter_nd(indices=where_not_sampling,
                                             updates=inputs_not_sampling,
                                             shape=base_shape))

            all_finished = math_ops.reduce_all(finished)
            no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
            next_inputs = control_flow_ops.cond(
                math_ops.logical_or(all_finished, no_samples),
                lambda: base_next_inputs, maybe_sample)
            return (finished, next_inputs, state)
Esempio n. 51
0
        def _update_ensemble():
            """A method to update the tree ensemble."""
            # Get next stamp token.
            next_ensemble_stamp = ensemble_stamp + 1
            # Finalize bias stats.
            _, _, _, bias_grads, bias_hess = bias_stats_accumulator.flush(
                ensemble_stamp, next_ensemble_stamp)

            # Finalize handler splits.
            are_splits_ready_list = []
            partition_ids_list = []
            gains_list = []
            split_info_list = []

            for handler in handlers:
                (are_splits_ready, partition_ids, gains,
                 split_info) = handler.make_splits(ensemble_stamp,
                                                   next_ensemble_stamp,
                                                   class_id)
                are_splits_ready_list.append(are_splits_ready)
                partition_ids_list.append(partition_ids)
                gains_list.append(gains)
                split_info_list.append(split_info)
            # Stack all the inputs to one tensor per type.
            # This is a workaround for the slowness of graph building in tf.cond.
            # See (b/36554864).
            split_sizes = array_ops.stack([
                array_ops.shape(partition_id)[0]
                for partition_id in partition_ids_list
            ])
            partition_ids = array_ops.concat(partition_ids_list, axis=0)
            gains = array_ops.concat(gains_list, axis=0)
            split_infos = array_ops.concat(split_info_list, axis=0)

            # Determine if all splits are ready.
            are_all_splits_ready = math_ops.reduce_all(
                array_ops.stack(are_splits_ready_list,
                                axis=0,
                                name="stack_handler_readiness"))

            # Define bias centering update operation.
            def _center_bias_fn():
                # Center tree ensemble bias.
                delta_updates = array_ops.where(
                    bias_hess > 0, -bias_grads / bias_hess,
                    array_ops.zeros_like(bias_grads))
                center_bias = training_ops.center_tree_ensemble_bias(
                    tree_ensemble_handle=self._ensemble_handle,
                    stamp_token=ensemble_stamp,
                    next_stamp_token=next_ensemble_stamp,
                    delta_updates=delta_updates,
                    learner_config=self._learner_config_serialized)
                return continue_centering.assign(center_bias)

            # Define ensemble growing operations.
            def _grow_ensemble_ready_fn():
                # Grow the ensemble given the current candidates.
                sizes = array_ops.unstack(split_sizes)
                partition_ids_list = list(
                    array_ops.split(partition_ids, sizes, axis=0))
                gains_list = list(array_ops.split(gains, sizes, axis=0))
                split_info_list = list(
                    array_ops.split(split_infos, sizes, axis=0))
                return training_ops.grow_tree_ensemble(
                    tree_ensemble_handle=self._ensemble_handle,
                    stamp_token=ensemble_stamp,
                    next_stamp_token=next_ensemble_stamp,
                    learning_rate=learning_rate,
                    partition_ids=partition_ids_list,
                    gains=gains_list,
                    splits=split_info_list,
                    learner_config=self._learner_config_serialized,
                    dropout_seed=dropout_seed,
                    center_bias=self._center_bias)

            def _grow_ensemble_not_ready_fn():
                # Don't grow the ensemble, just update the stamp.
                return training_ops.grow_tree_ensemble(
                    tree_ensemble_handle=self._ensemble_handle,
                    stamp_token=ensemble_stamp,
                    next_stamp_token=next_ensemble_stamp,
                    learning_rate=0,
                    partition_ids=[],
                    gains=[],
                    splits=[],
                    learner_config=self._learner_config_serialized,
                    dropout_seed=dropout_seed,
                    center_bias=self._center_bias)

            def _grow_ensemble_fn():
                # Conditionally grow an ensemble depending on whether the splits
                # from all the handlers are ready.
                return control_flow_ops.cond(are_all_splits_ready,
                                             _grow_ensemble_ready_fn,
                                             _grow_ensemble_not_ready_fn)

            # Update ensemble.
            update_ops = [are_all_splits_ready]
            update_model = control_flow_ops.cond(continue_centering,
                                                 _center_bias_fn,
                                                 _grow_ensemble_fn)
            update_ops.append(update_model)

            # Update ensemble stats.
            with ops.control_dependencies([update_model]):
                stats = training_ops.tree_ensemble_stats(
                    self._ensemble_handle, stamp_token=next_ensemble_stamp)
                update_ops.append(self._finalized_trees.assign(
                    stats.num_trees))
                update_ops.append(
                    self._attempted_trees.assign(stats.attempted_trees))
                update_ops.append(num_layers.assign(stats.num_layers))
                update_ops.append(active_tree.assign(stats.active_tree))
                update_ops.append(active_layer.assign(stats.active_layer))

            # Flush step stats.
            update_ops.extend(
                steps_accumulator.flush(ensemble_stamp, next_ensemble_stamp))
            return control_flow_ops.group(*update_ops, name="update_ensemble")
Esempio n. 52
0
def get_weights_and_check_match_logits(features,
                                       weight_column,
                                       logits,
                                       allow_per_logit_weights=False):
    """Fetches weights from features and checks that the shape matches logits.

  Consider logits of shape [D0, D1, ... DN, logits_dimension]. Weights shape
  can be either:
  * [D0, D1, ... DN, logits_dimension] if `allow_per_logit_weights=True`.
  * [D0, D1, ... DN, 1]
  * [D0, D1, ... DN]: In this case, weights is reshaped into
    [D0, D1, ... DN, 1] to work with weight broadcasting rules.

  Args:
    features: The features dict that contains weights.
    weight_column: The weight column. If not given, this method returns 1.
    logits: logits Tensor.
    allow_per_logit_weights: Boolean. Whether we allow weights along the logits
      dimension, namely shape `[D0, D1, ... DN, logits_dimension]`.

  Returns:
    Validated and reshaped weights Tensor.

  Raises:
    ValueError: If the weights `Tensor` cannot be cast into float.
  """
    if allow_per_logit_weights:
        err_msg = (
            'weights shape must be [D0, D1, ... DN], [D0, D1, ... DN, 1] or '
            '[D0, D1, ... DN, logits_dimension]')
    else:
        err_msg = (
            'weights shape must be [D0, D1, ... DN] or [D0, D1, ... DN, 1]')
    with ops.name_scope('weights',
                        values=tuple(six.itervalues(features)) +
                        (logits, )) as scope:
        # Fetch the weights.
        if weight_column is None:
            return 1.
        # TODO(b/117839674): update feature_column
        if isinstance(weight_column, six.string_types):
            weight_column = feature_column_lib.numeric_column(
                key=weight_column, shape=(1, ))
        if not isinstance(weight_column,
                          (feature_column_lib.NumericColumn, _NumericColumn)):
            raise TypeError(
                'Weight column must be either a string or NumericColumn.'
                ' Given type: {}.'.format(type(weight_column)))
        weights = weight_column._get_dense_tensor(  # pylint: disable=protected-access
            _LazyBuilder(features))
        if not (weights.dtype.is_floating or weights.dtype.is_integer):
            raise ValueError('Weight column should be castable to float. '
                             'Given dtype: {}'.format(weights.dtype))
        weights = math_ops.to_float(weights, name='weights')
        # Validate the weights shape.
        # Eager mode.
        if context.executing_eagerly():
            weights_shape = weights._shape_tuple()  # pylint: disable=protected-access
            logits_shape = logits._shape_tuple()  # pylint: disable=protected-access
            weights_rank = weights._rank()  # pylint: disable=protected-access
            logits_rank = logits._rank()  # pylint: disable=protected-access
            if (weights_rank is not None and logits_rank is not None
                    and weights_rank == logits_rank - 1):
                if logits_shape[:-1] != weights_shape:
                    raise ValueError(
                        '{}, logits_shape: {}. weights_shape: {}.'.format(
                            err_msg, logits_shape, weights_shape))
                return array_ops.expand_dims(weights, -1, name=scope)
            supported_weights_shape = logits_shape[:-1] + (1, )
            if allow_per_logit_weights:
                if (logits_shape != weights_shape
                        and supported_weights_shape != weights_shape):
                    raise ValueError(
                        '{}, logits_shape: {}. weights_shape: {}.'.format(
                            err_msg, logits_shape, weights_shape))
            else:
                if supported_weights_shape != weights_shape:
                    raise ValueError(
                        '{}, logits_shape: {}. weights_shape: {}.'.format(
                            err_msg, logits_shape, weights_shape))
            return weights

        # Graph mode.
        weights_shape = array_ops.shape(weights, name='weights_shape')
        logits_shape = array_ops.shape(logits, name='logits_shape')
        if (weights.shape.ndims is not None and logits.shape.ndims is not None
                and weights.shape.ndims == logits.shape.ndims - 1):
            assert_dimension = check_ops.assert_equal(logits_shape[:-1],
                                                      weights_shape,
                                                      message=err_msg,
                                                      data=[
                                                          'logits_shape: ',
                                                          logits_shape,
                                                          'weights_shape: ',
                                                          weights_shape
                                                      ])
            with ops.control_dependencies([assert_dimension]):
                return array_ops.expand_dims(weights, -1, name=scope)
        supported_weights_shape = array_ops.concat([logits_shape[:-1], [1]],
                                                   axis=0)
        if allow_per_logit_weights:
            condition = math_ops.reduce_any([
                math_ops.reduce_all(math_ops.equal(logits_shape,
                                                   weights_shape)),
                math_ops.reduce_all(
                    math_ops.equal(supported_weights_shape, weights_shape))
            ])
            assert_dimension = control_flow_ops.Assert(condition=condition,
                                                       data=[
                                                           err_msg,
                                                           'logits_shape: ',
                                                           logits_shape,
                                                           'weights_shape: ',
                                                           weights_shape
                                                       ])
        else:
            assert_dimension = check_ops.assert_equal(supported_weights_shape,
                                                      weights_shape,
                                                      message=err_msg,
                                                      data=[
                                                          'logits_shape: ',
                                                          logits_shape,
                                                          'weights_shape: ',
                                                          weights_shape
                                                      ])
        with ops.control_dependencies([assert_dimension]):
            return array_ops.identity(weights, name=scope)
Esempio n. 53
0
def stratified_sample(tensors,
                      labels,
                      target_probs,
                      batch_size,
                      init_probs=None,
                      enqueue_many=False,
                      queue_capacity=16,
                      threads_per_queue=1,
                      name=None):
    """Stochastically creates batches based on per-class probabilities.

  This method discards examples. Internally, it creates one queue to amortize
  the cost of disk reads, and one queue to hold the properly-proportioned
  batch.

  Args:
    tensors: List of tensors for data. All tensors are either one item or a
        batch, according to enqueue_many.
    labels: Tensor for label of data. Label is a single integer or a batch,
        depending on `enqueue_many`. It is not a one-hot vector.
    target_probs: Target class proportions in batch. An object whose type has a
        registered Tensor conversion function.
    batch_size: Size of batch to be returned.
    init_probs: Class proportions in the data. An object whose type has a
        registered Tensor conversion function, or `None` for estimating the
        initial distribution.
    enqueue_many: Bool. If true, interpret input tensors as having a batch
        dimension.
    queue_capacity: Capacity of the large queue that holds input examples.
    threads_per_queue: Number of threads for the large queue that holds input
        examples and for the final queue with the proper class proportions.
    name: Optional prefix for ops created by this function.
  Raises:
    ValueError: If `tensors` isn't iterable.
    ValueError: `enqueue_many` is True and labels doesn't have a batch
        dimension, or if `enqueue_many` is False and labels isn't a scalar.
    ValueError: `enqueue_many` is True, and batch dimension on data and labels
        don't match.
    ValueError: if probs don't sum to one.
    ValueError: if a zero initial probability class has a nonzero target
        probability.
    TFAssertion: if labels aren't integers in [0, num classes).
  Returns:
    (data_batch, label_batch), where data_batch is a list of tensors of the same
        length as `tensors`

  Example:
    # Get tensor for a single data and label example.
    data, label = data_provider.Get(['data', 'label'])

    # Get stratified batch according to per-class probabilities.
    target_probs = [...distribution you want...]
    [data_batch], labels = tf.contrib.training.stratified_sample(
        [data], label, target_probs)

    # Run batch through network.
    ...
  """
    with ops.name_scope(name, 'stratified_sample', list(tensors) + [labels]):
        tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensors)
        labels = ops.convert_to_tensor(labels)
        target_probs = ops.convert_to_tensor(target_probs,
                                             dtype=dtypes.float32)
        # Reduce the case of a single example to that of a batch of size 1.
        if not enqueue_many:
            tensor_list = [
                array_ops.expand_dims(tensor, 0) for tensor in tensor_list
            ]
            labels = array_ops.expand_dims(labels, 0)

        # If `init_probs` is `None`, set up online estimation of data distribution.
        if init_probs is None:
            # We use `target_probs` to get the number of classes, so its shape must be
            # fully defined at graph construction time.
            target_probs.get_shape().assert_is_fully_defined()
            init_probs = _estimate_data_distribution(
                labels,
                target_probs.get_shape().num_elements())
        else:
            init_probs = ops.convert_to_tensor(init_probs,
                                               dtype=dtypes.float32)

        # Validate that input is consistent.
        tensor_list, labels, [init_probs, target_probs
                              ] = _verify_input(tensor_list, labels,
                                                [init_probs, target_probs])

        # Check that all zero initial probabilities also have zero target
        # probabilities.
        assert_op = control_flow_ops.Assert(
            math_ops.reduce_all(
                math_ops.logical_or(math_ops.not_equal(init_probs, 0),
                                    math_ops.equal(target_probs, 0))),
            [
                'All classes with zero initial probability must also have zero target '
                'probability: ', init_probs, target_probs
            ])
        init_probs = control_flow_ops.with_dependencies([assert_op],
                                                        init_probs)

        # Calculate acceptance sampling probabilities.
        accept_probs = _calculate_acceptance_probabilities(
            init_probs, target_probs)
        proportion_rejected = math_ops.reduce_sum(
            (1 - accept_probs) * init_probs)
        accept_probs = control_flow_ops.cond(
            math_ops.less(proportion_rejected, .5),
            lambda: accept_probs,
            lambda: logging_ops.Print(  # pylint: disable=g-long-lambda
                accept_probs, [accept_probs],
                message='Proportion of examples rejected by sampler is high.',
                first_n=10))

        # Make a single queue to hold input examples. Reshape output so examples
        # don't have singleton batch dimension.
        batched = input_ops.batch(tensor_list + [labels],
                                  batch_size=1,
                                  num_threads=threads_per_queue,
                                  capacity=queue_capacity,
                                  enqueue_many=True)
        val_list = [array_ops.squeeze(x, [0]) for x in batched[:-1]]
        label = array_ops.squeeze(batched[-1], [0])

        # Set up second queue containing batches that have the desired class
        # proportions.
        cur_prob = array_ops.gather(accept_probs, label)
        batched = input_ops.maybe_batch(
            val_list + [label],
            keep_input=random_ops.random_uniform([]) < cur_prob,
            batch_size=batch_size,
            num_threads=threads_per_queue)
        return batched[:-1], batched[-1]
Esempio n. 54
0
def _assert_increasing(t):  # Check Time is Monotonous
    assert_increasing = control_flow_ops.Assert(
        math_ops.reduce_all(t[1:] > t[:-1]),
        ['`t` must be monotonic increasing'])
    return ops.control_dependencies([assert_increasing])
Esempio n. 55
0
def _all_equal(tensor0, tensor1):
    with ops.name_scope('all_equal', values=[tensor0, tensor1]) as scope:
        return math_ops.reduce_all(math_ops.equal(tensor0,
                                                  tensor1,
                                                  name='equal'),
                                   name=scope)
Esempio n. 56
0
 def condition(unused_time, elements_finished, *_):
     return math_ops.logical_not(math_ops.reduce_all(elements_finished))
Esempio n. 57
0
 def all_shapes_equal():
   return math_ops.reduce_all(math_ops.equal(
       array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0),
       array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))
Esempio n. 58
0
 def assertNoEqualPair(self, ls):
     for i in range(len(ls)):
         for j in range(i + 1, len(ls)):
             self.assertFalse(math_ops.reduce_all(ls[i] == ls[j]))
Esempio n. 59
0
 def initialize(self, name=None):
     with ops.name_scope(name, "MyHelperInitialize"):
         finished = math_ops.equal(0, self._sequence_length)
         all_finished = math_ops.reduce_all(finished)
         next_inputs = self._embedding_fn(self._input_tas.read(0))
         return (finished, next_inputs)
Esempio n. 60
0
    def step(self, time, inputs, state, name=None):
        """Perform a decoding step.

    Args:
      time: scalar `int32` tensor.
      inputs: A (structure of) input tensors.
      state: A (structure of) state tensors and TensorArrays.
      name: Name scope for any created operations.

    Returns:
      `(outputs, next_state, next_inputs, finished)`.
    """
        batch_size = self._batch_size
        beam_width = self._beam_width
        end_token = self._end_token
        length_penalty_weight = self._length_penalty_weight

        with ops.name_scope(name, "BeamSearchDecoderStep",
                            (time, inputs, state)):
            cell_state = state.cell_state
            inputs = nest.map_structure(
                lambda inp: self._merge_batch_beams(inp, s=inp.shape[2:]),
                inputs)
            cell_state = nest.map_structure(self._maybe_merge_batch_beams,
                                            cell_state, self._cell.state_size)
            cell_outputs, next_cell_state = self._cell(inputs, cell_state)
            cell_outputs = nest.map_structure(
                lambda out: self._split_batch_beams(out, out.shape[1:]),
                cell_outputs)
            next_cell_state = nest.map_structure(self._maybe_split_batch_beams,
                                                 next_cell_state,
                                                 self._cell.state_size)

            if self._output_layer is not None:
                cell_outputs = self._output_layer(cell_outputs)

            mask = array_ops.one_hot(end_token,
                                     array_ops.shape(cell_outputs)[-1],
                                     dtype=dtypes.float32)
            # reduce_ratio = [0, 1e10, 6.1, 5.5, 3, 2, 1, 0.5]
            reduce_ratio = [0, 1e10]
            pred_fn_pairs = []

            def foo(i):
                return lambda: cell_outputs - mask * reduce_ratio[i]

            for i in range(1, len(reduce_ratio)):
                pred_fn_pairs.append((math_ops.equal(time, i), foo(i)))
            cell_outputs = control_flow_ops.case(pred_fn_pairs=pred_fn_pairs,
                                                 default=lambda: cell_outputs,
                                                 exclusive=True)

            beam_search_output, beam_search_state = _beam_search_step(
                time=time,
                logits=cell_outputs,
                next_cell_state=next_cell_state,
                beam_state=state,
                batch_size=batch_size,
                beam_width=beam_width,
                end_token=end_token,
                length_penalty_weight=length_penalty_weight)

            finished = beam_search_state.finished
            sample_ids = beam_search_output.predicted_ids
            next_inputs = control_flow_ops.cond(
                math_ops.reduce_all(finished), lambda: self._start_inputs,
                lambda: self._embedding_fn(sample_ids))

        return (beam_search_output, beam_search_state, next_inputs, finished)