Exemple #1
0
 def fn():
   h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
   c1 = gen_data_flow_ops.stack_push_v2(h1, v)
   with ops.control_dependencies([c1]):
     c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
   h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="bar")
   c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
   with ops.control_dependencies([c2]):
     c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
   return c1 + c2
Exemple #2
0
      def fn():
        h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
        h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")

        c1 = gen_data_flow_ops.stack_push_v2(h1, v1)
        with ops.control_dependencies([c1]):
          c2 = gen_data_flow_ops.stack_push_v2(h2, v2)
        with ops.control_dependencies([c2]):
          pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
          pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
        return [pop1, pop2]
 def testMultiStack(self):
   with self.test_session(), self.test_scope():
     v = array_ops.placeholder(dtypes.float32)
     h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
     c1 = gen_data_flow_ops.stack_push_v2(h1, v)
     with ops.control_dependencies([c1]):
       c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
     h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="bar")
     c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
     with ops.control_dependencies([c2]):
       c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
     r = c1 + c2
     self.assertAllClose(9.0, r.eval({v: 4.0}))
 def _testMultiStack(self, use_gpu):
   with self.test_session(use_gpu=use_gpu):
     h1 = gen_data_flow_ops.stack_v2(
         -1, elem_type=dtypes.float32, stack_name="foo")
     c1 = gen_data_flow_ops.stack_push_v2(h1, 4.0)
     with ops.control_dependencies([c1]):
       c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
     h2 = gen_data_flow_ops.stack_v2(
         -1, elem_type=dtypes.float32, stack_name="bar")
     c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
     with ops.control_dependencies([c2]):
       c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
     r = c1 + c2
     self.assertAllClose(9.0, r.eval())
Exemple #5
0
 def fn():
     h = gen_data_flow_ops.stack_v2(5,
                                    dtypes.float32,
                                    stack_name="foo")
     c = gen_data_flow_ops.stack_push_v2(h, v)
     with ops.control_dependencies([c]):
         gen_data_flow_ops.stack_close_v2(h)
Exemple #6
0
 def b(x):
     with ops.control_dependencies([x]):
         a = constant_op.constant(np.ones(2000),
                                  dtype=dtypes.float32)
         v = gen_data_flow_ops.stack_push_v2(h, a, swap_memory=True)
     with ops.control_dependencies([v]):
         return math_ops.add(x, 1)
Exemple #7
0
 def fn():
     h = gen_data_flow_ops.stack_v2(5,
                                    dtypes.float32,
                                    stack_name="foo")
     c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
     with ops.control_dependencies([c]):
         return gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
 def testPushCloseStack(self):
   with self.test_session() as sess, self.test_scope():
     v = array_ops.placeholder(dtypes.float32)
     h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
     c = gen_data_flow_ops.stack_push_v2(h, v)
     with ops.control_dependencies([c]):
       c1 = gen_data_flow_ops.stack_close_v2(h)
     sess.run(c1, {v: [[4.0, 5.0]]})
 def _testStackPushPop(self, use_gpu):
   with self.test_session(use_gpu=use_gpu):
     h = gen_data_flow_ops.stack_v2(
         -1, elem_type=dtypes.float32, stack_name="foo")
     c = gen_data_flow_ops.stack_push_v2(h, [[4.0, 5.0]])
     with ops.control_dependencies([c]):
       c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
     self.assertAllClose([[4.0, 5.0]], c1.eval())
 def testMultiStack(self):
     with self.test_session(), self.test_scope():
         v = array_ops.placeholder(dtypes.float32)
         h1 = gen_data_flow_ops.stack_v2(5,
                                         dtypes.float32,
                                         stack_name="foo")
         c1 = gen_data_flow_ops.stack_push_v2(h1, v)
         with ops.control_dependencies([c1]):
             c1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
         h2 = gen_data_flow_ops.stack_v2(5,
                                         dtypes.float32,
                                         stack_name="bar")
         c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
         with ops.control_dependencies([c2]):
             c2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)
         r = c1 + c2
         self.assertAllClose(9.0, r.eval({v: 4.0}))
 def _testPushCloseStack(self, use_gpu):
   with self.test_session(use_gpu=use_gpu) as sess:
     h = gen_data_flow_ops.stack_v2(
         -1, elem_type=dtypes.float32, stack_name="foo")
     c = gen_data_flow_ops.stack_push_v2(h, [[4.0, 5.0]])
     with ops.control_dependencies([c]):
       c1 = gen_data_flow_ops.stack_close_v2(h)
     sess.run(c1)
 def testPushCloseStack(self):
     with self.test_session() as sess, self.test_scope():
         v = array_ops.placeholder(dtypes.float32)
         h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
         c = gen_data_flow_ops.stack_push_v2(h, v)
         with ops.control_dependencies([c]):
             c1 = gen_data_flow_ops.stack_close_v2(h)
         sess.run(c1, {v: [[4.0, 5.0]]})
Exemple #13
0
 def fn():
     h = gen_data_flow_ops.stack_v2(5,
                                    dtypes.float32,
                                    stack_name="foo")
     c = gen_data_flow_ops.stack_push_v2(h, v)
     with ops.control_dependencies([c]):
         c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
     return c1
  def _testSameNameStacks(self, use_gpu):
    """Different stacks with the same name do not interfere."""
    with self.test_session(use_gpu=use_gpu) as sess:
      h1 = gen_data_flow_ops.stack_v2(
          -1, elem_type=dtypes.float32, stack_name="foo")
      h2 = gen_data_flow_ops.stack_v2(
          -1, elem_type=dtypes.float32, stack_name="foo")

      c1 = gen_data_flow_ops.stack_push_v2(h1, 4.0)
      with ops.control_dependencies([c1]):
        c2 = gen_data_flow_ops.stack_push_v2(h2, 5.0)
      with ops.control_dependencies([c2]):
        pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
        pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)

      out1, out2 = sess.run([pop1, pop2])
      self.assertAllClose(out1, 4.0)
      self.assertAllClose(out2, 5.0)
 def testStackPushPopSwap(self):
     with self.test_session(), self.test_scope():
         a = np.arange(2000)
         x = array_ops.placeholder(dtypes.float32)
         h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
         c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
         with ops.control_dependencies([c]):
             c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
         self.assertAllClose(a, c1.eval({x: a}))
 def testStackPushPop(self):
   with self.test_session(), self.test_scope():
     size = array_ops.placeholder(dtypes.int32)
     v = array_ops.placeholder(dtypes.float32)
     h = gen_data_flow_ops.stack_v2(size, dtypes.float32, stack_name="foo")
     c = gen_data_flow_ops.stack_push_v2(h, v)
     with ops.control_dependencies([c]):
       c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
     self.assertAllClose([[4.0, 5.0]], c1.eval({size: 5, v: [[4.0, 5.0]]}))
  def testSameNameStacks(self):
    """Different stacks with the same name do not interfere."""
    with self.test_session() as sess, self.test_scope():
      v1 = array_ops.placeholder(dtypes.float32)
      v2 = array_ops.placeholder(dtypes.float32)
      h1 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
      h2 = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")

      c1 = gen_data_flow_ops.stack_push_v2(h1, v1)
      with ops.control_dependencies([c1]):
        c2 = gen_data_flow_ops.stack_push_v2(h2, v2)
      with ops.control_dependencies([c2]):
        pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
        pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)

      out1, out2 = sess.run([pop1, pop2], {v1: 4.0, v2: 5.0})
      self.assertAllClose(out1, 4.0)
      self.assertAllClose(out2, 5.0)
 def testStackPushPopSwap(self):
   with self.test_session(), self.test_scope():
     a = np.arange(2000)
     x = array_ops.placeholder(dtypes.float32)
     h = gen_data_flow_ops.stack_v2(5, dtypes.float32, stack_name="foo")
     c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
     with ops.control_dependencies([c]):
       c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
     self.assertAllClose(a, c1.eval({x: a}))
 def _testStackPushPopSwap(self, use_gpu):
   with self.test_session(use_gpu=use_gpu):
     a = np.arange(2000)
     x = constant_op.constant(a, dtype=dtypes.float32)
     h = gen_data_flow_ops.stack_v2(
         -1, elem_type=dtypes.float32, stack_name="foo")
     c = gen_data_flow_ops.stack_push_v2(h, x, swap_memory=True)
     with ops.control_dependencies([c]):
       c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
     self.assertAllClose(a, c1.eval())
    def testSameNameStacks(self):
        """Different stacks with the same name do not interfere."""
        with self.test_session() as sess, self.test_scope():
            v1 = array_ops.placeholder(dtypes.float32)
            v2 = array_ops.placeholder(dtypes.float32)
            h1 = gen_data_flow_ops.stack_v2(5,
                                            dtypes.float32,
                                            stack_name="foo")
            h2 = gen_data_flow_ops.stack_v2(5,
                                            dtypes.float32,
                                            stack_name="foo")

            c1 = gen_data_flow_ops.stack_push_v2(h1, v1)
            with ops.control_dependencies([c1]):
                c2 = gen_data_flow_ops.stack_push_v2(h2, v2)
            with ops.control_dependencies([c2]):
                pop1 = gen_data_flow_ops.stack_pop_v2(h1, dtypes.float32)
                pop2 = gen_data_flow_ops.stack_pop_v2(h2, dtypes.float32)

            out1, out2 = sess.run([pop1, pop2], {v1: 4.0, v2: 5.0})
            self.assertAllClose(out1, 4.0)
            self.assertAllClose(out2, 5.0)
 def testStackPushPop(self):
     with self.test_session(), self.test_scope():
         size = array_ops.placeholder(dtypes.int32)
         v = array_ops.placeholder(dtypes.float32)
         h = gen_data_flow_ops.stack_v2(size,
                                        dtypes.float32,
                                        stack_name="foo")
         c = gen_data_flow_ops.stack_push_v2(h, v)
         with ops.control_dependencies([c]):
             c1 = gen_data_flow_ops.stack_pop_v2(h, dtypes.float32)
         self.assertAllClose([[4.0, 5.0]],
                             c1.eval({
                                 size: 5,
                                 v: [[4.0, 5.0]]
                             }))
 def b(x):
   with ops.control_dependencies([x]):
     a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
     v = gen_data_flow_ops.stack_push_v2(h, a, swap_memory=True)
   with ops.control_dependencies([v]):
     return math_ops.add(x, 1)
    def AddForwardAccumulator(self, value, dead_branch=False):
        """Add an accumulator for each forward tensor that is needed in backprop.

    This is added to the forward loop at the first time when a tensor
    in the forward loop is used by backprop gradient computation loop.
    We create an accumulator that accumulates the value of tensor at each
    iteration. Called in the control flow context where gradients() is called.

    The pseudocode is:
    ```
      acc = stack();
      while (_pivot) {
        acc = stack_push(acc, value);
      }
    ```

    We make sure that the stack push op in one iteration is executed before
    next iteration. This is achieved by adding a control edge from
    `forward_index.op.inputs[0].op` to the push op, and another control
    edge from the push op to either `forward_index.op` or `forward_sync`.

    Args:
      value: The source tensor in forward that is to be accumulated.
      dead_branch: True iff the tensor is on a dead branch of a cond.

    Returns:
      The stack that contains the accumulated history of the tensor.

    Raises:
      TypeError: For internal errors involving the value condition context.
      ValueError: If `value` is inside a XLA scope and a valid max size
        for the stack can't be found.
    """
        # curr_ctxt is the context that tf.gradients was called in.
        with self._forward_index.graph.as_default():
            curr_ctxt = ops.get_default_graph()._get_control_flow_context()  # pylint: disable=protected-access
            with ops.control_dependencies(None):
                if curr_ctxt:
                    curr_ctxt.Enter()
                with ops.colocate_with(value):
                    # We only need to pass maximum_iterations to the stack if
                    # we're inside an XLA context.
                    if not util.IsInXLAContext(value.op):
                        max_size = constant_op.constant(-1, dtypes.int32)
                    else:
                        max_size = _GetMaxSizeFromNestedMaximumIterations(
                            value, self.forward_context)
                    acc = gen_data_flow_ops.stack_v2(
                        max_size=max_size,
                        elem_type=value.dtype.base_dtype,
                        name="f_acc")
                if curr_ctxt:
                    curr_ctxt.Exit()

                # Make acc available in the forward context.
                enter_acc = self.forward_context.AddValue(acc)

                # Add the stack_push op in the context of value.op.
                swap_enabled = self.forward_context.swap_memory
                value_ctxt = util.GetOutputContext(value.op)
                if value_ctxt == self.forward_context:
                    # value is not nested in the forward context.
                    self.forward_context.Enter()
                    push = gen_data_flow_ops.stack_push_v2(
                        enter_acc, value, swap_memory=swap_enabled)
                    self.forward_context.Exit()
                    # Protect stack push and order it before forward_index.
                    self.forward_index.op._add_control_input(push.op)
                else:
                    # value is in a cond context within the forward context.
                    if not isinstance(value_ctxt,
                                      control_flow_ops.CondContext):
                        raise TypeError("value_ctxt is not a CondContext: %s" %
                                        value_ctxt)
                    if dead_branch:
                        # The special case for creating a zero tensor for a dead
                        # branch of a switch. See _ControlFlowState.ZerosLike().
                        value_ctxt.outer_context.Enter()
                        push = gen_data_flow_ops.stack_push_v2(
                            enter_acc, value, swap_memory=swap_enabled)
                        value_ctxt.outer_context.Exit()
                        push.op._set_control_flow_context(value_ctxt)
                    else:
                        value_ctxt.Enter()
                        push = gen_data_flow_ops.stack_push_v2(
                            enter_acc, value, swap_memory=swap_enabled)
                        value_ctxt.Exit()
                    # Protect stack push and order it before forward_sync.
                    self.forward_sync._add_control_input(push.op)
                # Order stack push after the successor of forward_index
                add_op = self.forward_index.op.inputs[0].op
                push.op._add_control_input(add_op)
                return acc