def _testSameNameStacks(self, use_gpu):
   with self.test_session(use_gpu=use_gpu):
     h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
     c1 = gen_data_flow_ops._stack_push(h1, 4.0)
     h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
     c2 = gen_data_flow_ops._stack_push(h2, 5.0)
     r = c1 + c2
     self.assertNotEqual(h1.eval()[1], h2.eval()[1])
Example #2
0
 def _testSameNameStacks(self, use_gpu):
   with self.cached_session(use_gpu=use_gpu):
     h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
     c1 = gen_data_flow_ops.stack_push(h1, 4.0)
     h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
     c2 = gen_data_flow_ops.stack_push(h2, 5.0)
     _ = c1 + c2
     self.assertNotEqual(self.evaluate(h1)[1], self.evaluate(h2)[1])
 def _testDuplicateStack(self, use_gpu):
     with self.test_session(use_gpu=use_gpu):
         h1 = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
         c1 = gen_data_flow_ops._stack_push(h1, 4.0)
         h2 = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
         c2 = gen_data_flow_ops._stack_push(h2, 5.0)
         r = c1 + c2
         with self.assertRaises(errors.AlreadyExistsError):
             r.eval()
Example #4
0
 def _testDuplicateStack(self, use_gpu):
   with self.test_session(use_gpu=use_gpu):
     h1 = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
     c1 = gen_data_flow_ops._stack_push(h1, 4.0)
     h2 = gen_data_flow_ops._stack(tf.float32, stack_name="foo")
     c2 = gen_data_flow_ops._stack_push(h2, 5.0)
     r = c1 + c2
     with self.assertRaises(errors.AlreadyExistsError):
       r.eval()
 def _testMultiStack(self, use_gpu):
   with self.test_session(use_gpu=use_gpu):
     h1 = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
     c1 = gen_data_flow_ops._stack_push(h1, 4.0)
     with ops.control_dependencies([c1]):
       c1 = gen_data_flow_ops._stack_pop(h1, dtypes.float32)
     h2 = gen_data_flow_ops._stack(dtypes.float32, stack_name="bar")
     c2 = gen_data_flow_ops._stack_push(h2, 5.0)
     with ops.control_dependencies([c2]):
       c2 = gen_data_flow_ops._stack_pop(h2, dtypes.float32)
     r = c1 + c2
     self.assertAllClose(9.0, r.eval())
  def _testStackWhileSwap(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      n = constant_op.constant(0)
      h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")

      def c(x):
        return math_ops.less(x, 10)

      def b(x):
        with ops.control_dependencies([x]):
          a = constant_op.constant(np.ones(2000), dtype=dtypes.float32)
          v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
        with ops.control_dependencies([v]):
          return math_ops.add(x, 1)

      r = control_flow_ops.while_loop(c, b, [n])

      v = constant_op.constant(np.zeros(2000), dtype=dtypes.float32)

      def c1(x, y):
        return math_ops.greater(x, 0)

      def b1(x, y):
        nx = math_ops.subtract(x, 1)
        ny = y + gen_data_flow_ops._stack_pop(h, dtypes.float32)
        return [nx, ny]

      rx, ry = control_flow_ops.while_loop(
          c1, b1, [r, v], [r.get_shape(), tensor_shape.unknown_shape()])
      self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
 def _testStackPushPop(self, use_gpu):
   with self.test_session(use_gpu=use_gpu):
     h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
     c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
     with ops.control_dependencies([c]):
       c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
     self.assertAllClose([[4.0, 5.0]], c1.eval())
 def _testPushCloseStack(self, use_gpu):
   with self.test_session(use_gpu=use_gpu) as sess:
     h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
     c = gen_data_flow_ops._stack_push(h, [[4.0, 5.0]])
     with ops.control_dependencies([c]):
       c1 = gen_data_flow_ops._stack_close(h)
     sess.run(c1)
    def _testStackWhileSwap(self, use_gpu):
        with self.test_session(use_gpu=use_gpu):
            n = tf.constant(0)
            h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")

            def c(x):
                return tf.less(x, 10)

            def b(x):
                with tf.control_dependencies([x]):
                    a = tf.constant(np.ones(2000), dtype=tf.float32)
                    v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
                with tf.control_dependencies([v]):
                    return tf.add(x, 1)

            r = tf.while_loop(c, b, [n])

            v = tf.constant(np.zeros(2000), dtype=tf.float32)

            def c1(x, y):
                return tf.greater(x, 0)

            def b1(x, y):
                nx = tf.sub(x, 1)
                ny = y + gen_data_flow_ops._stack_pop(h, tf.float32)
                return [nx, ny]

            rx, ry = tf.while_loop(c1, b1, [r, v])
            self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
    def testWhileStack_1(self):
        with self.test_session():
            s = gen_data_flow_ops._stack(tf.int32, stack_name="foo")
            i = tf.constant(0)

            def c(i):
                return tf.less(i, 10)

            def b(i):
                ni = tf.add(i, 1)
                ni = control_flow_ops.with_dependencies([gen_data_flow_ops._stack_push(s, i)], ni)
                return ni

            r = control_flow_ops.While(c, b, [i], parallel_iterations=1)

            x = tf.constant(0)

            def c1(i, _):
                return tf.greater(i, 0)

            def b1(i, x):
                ni = tf.sub(i, 1)
                nx = x + gen_data_flow_ops._stack_pop(s, tf.int32)
                return [ni, nx]

            _, rx = control_flow_ops.While(c1, b1, [r, x], parallel_iterations=1)
            self.assertEqual(45, rx.eval())
 def _testStackPushPopSwap(self, use_gpu):
   with self.test_session(use_gpu=use_gpu):
     a = np.arange(2000)
     x = constant_op.constant(a, dtype=dtypes.float32)
     h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
     c = gen_data_flow_ops._stack_push(h, x, swap_memory=True)
     with ops.control_dependencies([c]):
       c1 = gen_data_flow_ops._stack_pop(h, dtypes.float32)
     self.assertAllClose(a, c1.eval())
  def testWhileStack_1(self):
    with self.test_session():
      s = gen_data_flow_ops._stack(tf.int32, stack_name="foo")
      i = tf.constant(0)

      def c(i):
        return tf.less(i, 10)
      def b(i):
        ni = tf.add(i, 1)
        ni = control_flow_ops.with_dependencies(
            [gen_data_flow_ops._stack_push(s, i)], ni)
        return ni
      r = control_flow_ops.While(c, b, [i], parallel_iterations=1)

      x = tf.constant(0)
      def c1(i, _):
        return tf.greater(i, 0)
      def b1(i, x):
        ni = tf.sub(i, 1)
        nx = x + gen_data_flow_ops._stack_pop(s, tf.int32)
        return [ni, nx]
      _, rx = control_flow_ops.While(c1, b1, [r, x], parallel_iterations=1)
      self.assertEqual(45, rx.eval())
Example #13
0
  def _testStackWhileSwap(self, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      n = tf.constant(0)
      h = gen_data_flow_ops._stack(tf.float32, stack_name="foo")

      def c(x):
        return tf.less(x, 10)
      def b(x):
        with tf.control_dependencies([x]):
          a = tf.constant(np.ones(2000), dtype=tf.float32)
          v = gen_data_flow_ops._stack_push(h, a, swap_memory=True)
        with tf.control_dependencies([v]):
          return tf.add(x, 1)
      r = control_flow_ops.While(c, b, [n])

      v = tf.constant(np.zeros(2000), dtype=tf.float32)
      def c1(x, y):
        return tf.greater(x, 0)
      def b1(x, y):
        nx = tf.sub(x, 1)
        ny = y + gen_data_flow_ops._stack_pop(h, tf.float32)
        return [nx, ny]
      rx, ry = control_flow_ops.While(c1, b1, [r, v])
      self.assertAllClose(np.ones(2000) * 10.0, ry.eval())
 def _testCloseStack(self, use_gpu):
   with self.test_session(use_gpu=use_gpu) as sess:
     h = gen_data_flow_ops._stack(dtypes.float32, stack_name="foo")
     c1 = gen_data_flow_ops._stack_close(h)
     sess.run(c1)