Пример #1
0
  def testConst(self):
    np.random.seed(7)
    with self.test_session(use_gpu=True):
      for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
        data = np.random.randn(*shape).astype(np.float32)
        # Pack back into a single tensorflow tensor directly using np array
        c = array_ops.stack(data)
        # This is implemented via a Const:
        self.assertEqual(c.op.type, "Const")
        self.assertAllEqual(c.eval(), data)

        c = array_ops.parallel_stack(data)
        self.assertAllEqual(c.eval(), data)

        # Python lists also work for 1-D case:
        if len(shape) == 1:
          data_list = list(data)
          cl = array_ops.stack(data_list)
          self.assertEqual(cl.op.type, "Const")
          self.assertAllEqual(cl.eval(), data)

          cl = array_ops.parallel_stack(data_list)
          self.assertAllEqual(cl.eval(), data)

      # Verify that shape induction works with shapes produced via const stack
      a = constant_op.constant([1, 2, 3, 4, 5, 6])
      b = array_ops.reshape(a, array_ops.stack([2, 3]))
      self.assertAllEqual(b.get_shape(), [2, 3])
    def testConst(self):
        np.random.seed(7)
        with self.test_session(use_gpu=True):
            for shape in (2, ), (3, ), (2, 3), (3, 2), (4, 3, 2):
                data = np.random.randn(*shape).astype(np.float32)
                # Pack back into a single tensorflow tensor directly using np array
                c = array_ops.pack(data)
                # This is implemented via a Const:
                self.assertEqual(c.op.type, "Const")
                self.assertAllEqual(c.eval(), data)

                c = array_ops.parallel_stack(data)
                self.assertAllEqual(c.eval(), data)

                # Python lists also work for 1-D case:
                if len(shape) == 1:
                    data_list = list(data)
                    cl = array_ops.pack(data_list)
                    self.assertEqual(cl.op.type, "Const")
                    self.assertAllEqual(cl.eval(), data)

                    cl = array_ops.stack(data_list)
                    self.assertEqual(cl.op.type, "Const")
                    self.assertAllEqual(cl.eval(), data)

                    cl = array_ops.parallel_stack(data_list)
                    self.assertAllEqual(cl.eval(), data)

            # Verify that shape induction works with shapes produced via const pack
            a = constant_op.constant([1, 2, 3, 4, 5, 6])
            b = array_ops.reshape(a, array_ops.pack([2, 3]))
            self.assertAllEqual(b.get_shape(), [2, 3])

            b = array_ops.reshape(a, array_ops.stack([2, 3]))
            self.assertAllEqual(b.get_shape(), [2, 3])
Пример #3
0
  def testConstParallelGPU(self):
    np.random.seed(7)
    with self.session(use_gpu=True):
      for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
        data = np.random.randn(*shape).astype(np.float32)
        if len(shape) == 1:
          data_list = list(data)
          cl = array_ops.parallel_stack(data_list)
          self.assertAllEqual(cl.eval(), data)

        data = np.random.randn(*shape).astype(np.float32)
        c = array_ops.parallel_stack(data)
        self.assertAllEqual(c.eval(), data)
Пример #4
0
    def testConstParallelCPU(self):
        np.random.seed(7)
        with self.session(use_gpu=False):
            for shape in (2, ), (3, ), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
                with self.subTest(shape=shape):
                    data = self.randn(shape, np.float32)
                    if len(shape) == 1:
                        data_list = list(data)
                        cl = array_ops.parallel_stack(data_list)
                        self.assertAllEqual(cl, data)

                    data = self.randn(shape, np.float32)
                    c = array_ops.parallel_stack(data)
                    self.assertAllEqual(c, data)
Пример #5
0
    def testConstParallelGPU(self):
        # tf.parallel_stack is only supported in graph mode.
        with ops.Graph().as_default():
            np.random.seed(7)
            with test_util.device(use_gpu=True):
                for shape in (2, ), (3, ), (2, 3), (3, 2), (4, 3, 2):
                    with self.subTest(shape=shape):
                        data = self.randn(shape, np.float32)
                        if len(shape) == 1:
                            data_list = list(data)
                            cl = array_ops.parallel_stack(data_list)
                            self.assertAllEqual(cl, data)

                        data = self.randn(shape, np.float32)
                        c = array_ops.parallel_stack(data)
                        self.assertAllEqual(c, data)
Пример #6
0
 def testSimpleParallelGPU(self):
   np.random.seed(7)
   with self.session(use_gpu=True):
     for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
       data = np.random.randn(*shape).astype(np.float32)
       xs = list(map(constant_op.constant, data))
       c = array_ops.parallel_stack(xs)
       self.assertAllEqual(c.eval(), data)
Пример #7
0
  def testAxis0Default(self):
    with self.test_session(use_gpu=True):
      t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
      stacked = array_ops.stack(t).eval()
      parallel_stacked = array_ops.parallel_stack(t).eval()

    self.assertAllEqual(stacked, np.array([[1, 2, 3], [4, 5, 6]]))
    self.assertAllEqual(parallel_stacked, np.array([[1, 2, 3], [4, 5, 6]]))
Пример #8
0
  def testAxis0DefaultGPU(self):
    with self.session(use_gpu=True):
      t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
      stacked = array_ops.stack(t).eval()
      parallel_stacked = array_ops.parallel_stack(t).eval()

    expected = np.array([[1, 2, 3], [4, 5, 6]])
    self.assertAllEqual(stacked, expected)
    self.assertAllEqual(parallel_stacked, expected)
Пример #9
0
  def testAxis0DefaultCPU(self):
    with self.session(use_gpu=False):
      t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
      stacked = array_ops.stack(t).eval()
      parallel_stacked = array_ops.parallel_stack(t).eval()

    expected = np.array([[1, 2, 3], [4, 5, 6]])
    self.assertAllEqual(stacked, expected)
    self.assertAllEqual(parallel_stacked, expected)
Пример #10
0
  def testZeroSizeGPU(self):
    # Verify that stack doesn't crash for zero size inputs
    with self.session(use_gpu=True):
      for shape in (0,), (3, 0), (0, 3):
        x = np.zeros((2,) + shape).astype(np.int32)
        p = array_ops.stack(list(x)).eval()
        self.assertAllEqual(p, x)

        p = array_ops.parallel_stack(list(x)).eval()
        self.assertAllEqual(p, x)
Пример #11
0
 def testSimpleParallelGPU(self):
     np.random.seed(7)
     with self.session(use_gpu=True):
         for shape in (2, ), (3, ), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24,
                                                                3):
             with self.subTest(shape=shape):
                 data = self.randn(shape, np.float32)
                 xs = list(map(constant_op.constant, data))
                 c = array_ops.parallel_stack(xs)
                 self.assertAllEqual(c, data)
Пример #12
0
 def testSimpleParallelGPU(self):
   # tf.parallel_stack is only supported in graph mode.
   with ops.Graph().as_default():
     with test_util.device(use_gpu=True):
       for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
         with self.subTest(shape=shape):
           data = self.randn(shape, np.float32)
           xs = list(map(constant_op.constant, data))
           c = array_ops.parallel_stack(xs)
           self.assertAllEqual(c, data)
Пример #13
0
  def testAxis0DefaultGPU(self):
    # tf.parallel_stack is only supported in graph mode.
    with ops.Graph().as_default():
      with test_util.device(use_gpu=True):
        t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
        stacked = self.evaluate(array_ops.stack(t))
        parallel_stacked = self.evaluate(array_ops.parallel_stack(t))

      expected = np.array([[1, 2, 3], [4, 5, 6]])
      self.assertAllEqual(stacked, expected)
      self.assertAllEqual(parallel_stacked, expected)
Пример #14
0
    def testZeroSizeGPU(self):
        # tf.parallel_stack is only supported in graph mode.
        with ops.Graph().as_default():
            # Verify that stack doesn't crash for zero size inputs
            with test_util.device(use_gpu=True):
                for shape in (0, ), (3, 0), (0, 3):
                    with self.subTest(shape=shape):
                        x = np.zeros((2, ) + shape).astype(np.int32)
                        p = self.evaluate(array_ops.stack(list(x)))
                        self.assertAllEqual(p, x)

                        p = self.evaluate(array_ops.parallel_stack(list(x)))
                        self.assertAllEqual(p, x)
Пример #15
0
  def testSimple(self):
    np.random.seed(7)
    with self.test_session(use_gpu=True):
      for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
        data = np.random.randn(*shape)
        # Convert [data[0], data[1], ...] separately to tensorflow
        # TODO(irving): Remove list() once we handle maps correctly
        xs = list(map(constant_op.constant, data))
        # Pack back into a single tensorflow tensor
        c = array_ops.stack(xs)
        self.assertAllEqual(c.eval(), data)

        c = array_ops.parallel_stack(xs)
        self.assertAllEqual(c.eval(), data)
    def testAxis0Default(self):
        with self.test_session(use_gpu=True):
            t = [
                constant_op.constant([1, 2, 3]),
                constant_op.constant([4, 5, 6])
            ]

            packed = array_ops.pack(t).eval()
            stacked = array_ops.stack(t).eval()
            parallel_stacked = array_ops.parallel_stack(t).eval()

        self.assertAllEqual(packed, np.array([[1, 2, 3], [4, 5, 6]]))
        self.assertAllEqual(stacked, np.array([[1, 2, 3], [4, 5, 6]]))
        self.assertAllEqual(parallel_stacked, np.array([[1, 2, 3], [4, 5, 6]]))
Пример #17
0
    def testSimple(self):
        np.random.seed(7)
        with self.test_session(use_gpu=True):
            for shape in (2, ), (3, ), (2, 3), (3, 2), (4, 3, 2):
                data = np.random.randn(*shape)
                # Convert [data[0], data[1], ...] separately to tensorflow
                # TODO(irving): Remove list() once we handle maps correctly
                xs = list(map(constant_op.constant, data))
                # Pack back into a single tensorflow tensor
                c = array_ops.stack(xs)
                self.assertAllEqual(c.eval(), data)

                c = array_ops.parallel_stack(xs)
                self.assertAllEqual(c.eval(), data)