def testDestroyTemporaryVariableTwice(self):
   with self.test_session(use_gpu=True):
     var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
     val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
     val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
     final = val1 + val2
     with self.assertRaises(errors.NotFoundError):
       final.eval()
Пример #2
0
 def testDestroyTemporaryVariableTwice(self):
   with self.test_session(use_gpu=True):
     var = gen_state_ops._temporary_variable([1, 2], tf.float32)
     val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
     val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
     final = val1 + val2
     with self.assertRaises(errors.NotFoundError):
       final.eval()
 def testDestroyNonexistentTemporaryVariable(self):
     with self.test_session(use_gpu=True):
         var = gen_state_ops._temporary_variable([1, 2], tf.float32)
         final = gen_state_ops._destroy_temporary_variable(var,
                                                           var_name="bad")
         with self.assertRaises(errors.NotFoundError):
             final.eval()
Пример #4
0
 def testTemporaryVariable(self):
     with self.test_session(use_gpu=True):
         var = gen_state_ops._temporary_variable([1, 2], tf.float32, var_name="foo")
         var = tf.assign(var, [[4.0, 5.0]])
         var = tf.assign_add(var, [[6.0, 7.0]])
         final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
         self.assertAllClose([[10.0, 12.0]], final.eval())
 def _AccumulateNTemplate(self, inputs, init, shape, validate_shape):
   var = gen_state_ops._temporary_variable(
       shape=shape, dtype=inputs[0].dtype.base_dtype)
   ref = tf.assign(var, init, validate_shape=validate_shape)
   update_ops = [tf.assign_add(ref, tensor, use_locking=True).op
                 for tensor in inputs]
   with tf.control_dependencies(update_ops):
     return gen_state_ops._destroy_temporary_variable(
         ref, var_name=var.op.name)
 def testTemporaryVariable(self):
     with self.test_session(use_gpu=True):
         var = gen_state_ops._temporary_variable([1, 2],
                                                 tf.float32,
                                                 var_name="foo")
         var = tf.assign(var, [[4.0, 5.0]])
         var = tf.assign_add(var, [[6.0, 7.0]])
         final = gen_state_ops._destroy_temporary_variable(var,
                                                           var_name="foo")
         self.assertAllClose([[10.0, 12.0]], final.eval())
 def _AccumulateNTemplate(self, inputs, init, shape, validate_shape):
     var = gen_state_ops._temporary_variable(
         shape=shape, dtype=inputs[0].dtype.base_dtype)
     ref = state_ops.assign(var, init, validate_shape=validate_shape)
     update_ops = [
         state_ops.assign_add(ref, tensor, use_locking=True).op
         for tensor in inputs
     ]
     with ops.control_dependencies(update_ops):
         return gen_state_ops._destroy_temporary_variable(
             ref, var_name=var.op.name)
Пример #8
0
def _thin_stack_lookup_gradient(op, grad_stack1, grad_stack2, grad_buf_top, _):
    stack, buffer, _, _, buffer_cursors, transitions = op.inputs

    stack2_ptrs = op.outputs[3]
    t = op.get_attr("timestep")

    batch_size = buffer_cursors.get_shape().as_list()[0]
    num_tokens = buffer.get_shape().as_list()[0] / batch_size
    batch_range = math_ops.range(batch_size)
    batch_range_i = tf.to_float(batch_range)

    grad_stack_name = "grad_stack_%i_%s" % (t, str(uuid.uuid4())[:15])
    grad_buffer_name = "grad_buffer_%i_%s" % (t, str(uuid.uuid4())[:15])
    grad_stack = gen_state_ops._temporary_variable(stack.get_shape().as_list(), tf.float32, grad_stack_name)
    grad_buffer = gen_state_ops._temporary_variable(buffer.get_shape().as_list(), tf.float32, grad_buffer_name)
    grad_stack = tf.assign(grad_stack, tf.zeros_like(grad_stack))
    grad_buffer = tf.assign(grad_buffer, tf.zeros_like(grad_buffer))

    updates = []

    # Write grad_stack1 into block (t - 1)
    if t >= 1:
      in_cursors = (t - 1) * batch_size + batch_range
      grad_stack = tf.scatter_add(grad_stack, in_cursors, grad_stack1)

    # Write grad_stack2 using stored lookup pointers
    grad_stack = floaty_scatter_add(grad_stack, stack2_ptrs * batch_size + batch_range_i, grad_stack2)

    # Use buffer_cursors to scatter grads into buffer.
    buffer_ptrs = tf.minimum((float) (num_tokens * batch_size) - 1.0,
                              buffer_cursors * batch_size + batch_range_i)
    grad_buffer = floaty_scatter_add(grad_buffer, buffer_ptrs, grad_buf_top)

    with tf.control_dependencies([grad_stack, grad_buffer]):
      grad_stack = gen_state_ops._destroy_temporary_variable(grad_stack, grad_stack_name)
      grad_buffer = gen_state_ops._destroy_temporary_variable(grad_buffer, grad_buffer_name)

      with tf.control_dependencies([grad_stack, grad_buffer]):
        return grad_stack, grad_buffer, None, None, None, None
Пример #9
0
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
  """Returns the element-wise sum of a list of tensors.

  Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
  otherwise, these are inferred.

  For example:

  ```python
  # tensor 'a' is [[1, 2], [3, 4]]
  # tensor `b` is [[5, 0], [0, 6]]
  tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]

  # Explicitly pass shape and type
  tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
    ==> [[7, 4], [6, 14]]
  ```

  Args:
    inputs: A list of `Tensor` objects, each with same shape and type.
    shape: Shape of elements of `inputs`.
    tensor_dtype: The type of `inputs`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` of same shape and type as the elements of `inputs`.

  Raises:
    ValueError: If `inputs` don't all have same shape and dtype or the shape
    cannot be inferred.
  """
  if tensor_dtype is None:
    if not inputs or not isinstance(inputs, (list, tuple)):
      raise ValueError("inputs must be a list of at least one Tensor with the "
                       "same dtype and shape")
    inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
    if not all(isinstance(x, ops.Tensor) for x in inputs):
      raise ValueError("inputs must be a list of at least one Tensor with the "
                       "same dtype and shape")
    if not all(x.dtype == inputs[0].dtype for x in inputs):
      raise ValueError("inputs must be a list of at least one Tensor with the "
                       "same dtype and shape")
    tensor_dtype = inputs[0].dtype
  if shape is not None:
    shape = tensor_shape.as_shape(shape)
  else:
    shape = tensor_shape.unknown_shape()
    for input_tensor in inputs:
      if isinstance(input_tensor, ops.Tensor):
        shape = shape.merge_with(input_tensor.get_shape())
  if not shape.is_fully_defined():
    # TODO(pbar): Make a version of assign_add that accepts an uninitialized
    # lvalue, and takes its shape from that? This would allow accumulate_n to
    # work in all situations that add_n currently works.
    raise ValueError("Cannot infer the shape of the accumulator for "
                     "accumulate_n. Pass the shape argument, or set the shape "
                     "of at least one of the inputs.")
  with ops.op_scope(inputs, name, "AccumulateN") as name:
    var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
    var_name = var.op.name
    var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
    update_ops = []
    for input_tensor in inputs:
      op = state_ops.assign_add(var, input_tensor, use_locking=True)
      update_ops.append(op)
    with ops.control_dependencies(update_ops):
      return gen_state_ops._destroy_temporary_variable(var,
                                                       var_name=var_name,
                                                       name=name)
Пример #10
0
 def testDestroyNonexistentTemporaryVariable(self):
   with self.test_session(use_gpu=True):
     var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
     final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
     with self.assertRaises(errors.NotFoundError):
       final.eval()