def testInt(self):
   np.random.seed(54321)
   x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
   tf_x = ops.convert_n_to_tensor(x)
   with self.test_session(use_gpu=True):
     self.assertAllEqual(sum(x), av2.accumulate_n_v2(tf_x).eval())
     self.assertAllEqual(x[0] * 6, av2.accumulate_n_v2([tf_x[0]] * 6).eval())
 def testInt(self):
   np.random.seed(54321)
   x = [np.random.randint(-128, 128, (5, 4, 3, 2, 1)) for _ in range(6)]
   tf_x = ops.convert_n_to_tensor(x)
   with self.test_session(use_gpu=True):
     self.assertAllEqual(sum(x), av2.accumulate_n_v2(tf_x).eval())
     self.assertAllEqual(x[0] * 6, av2.accumulate_n_v2([tf_x[0]] * 6).eval())
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   with self.test_session(use_gpu=True):
     self.assertAllClose(sum(x), av2.accumulate_n_v2(tf_x).eval())
     self.assertAllClose(x[0] * 5, av2.accumulate_n_v2([tf_x[0]] * 5).eval())
 def testFloat(self):
   np.random.seed(12345)
   x = [np.random.random((1, 2, 3, 4, 5)) - 0.5 for _ in range(5)]
   tf_x = ops.convert_n_to_tensor(x)
   with self.test_session(use_gpu=True):
     self.assertAllClose(sum(x), av2.accumulate_n_v2(tf_x).eval())
     self.assertAllClose(x[0] * 5, av2.accumulate_n_v2([tf_x[0]] * 5).eval())
 def testWrongShape(self):
     with self.test_session():
         with self.assertRaises(ValueError):
             a = variables.Variable(0.2)
             b = variables.Variable(0.1)
             tf_val = av2.accumulate_n_v2([a, b],
                                          shape=[2,
                                                 2])  # Should be shape=[]
 def testGrad(self):
   np.random.seed(42)
   for num_inputs in range(1, 10):
     with self.test_session(use_gpu=True) as sess:
       input_vars = [
           variables.Variable(10.0 * np.random.random())
           for i in range(0, num_inputs)
       ]
       accum_n = av2.accumulate_n_v2(input_vars)
       sess.run(variables.global_variables_initializer())
       accum_n_grad = gradients.gradients(accum_n, input_vars)
       self.assertAllEqual(np.repeat(1.0, num_inputs), # d/dx (x + y + ...) = 1
                           [g.eval() for g in accum_n_grad])
 def testSimple(self):
   with self.test_session():
     random_arrays = [
         np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
     ]
     random_tensors = [
         ops.convert_to_tensor(x, dtype=dtypes_lib.float32)
         for x in random_arrays
     ]
     tf_val = av2.accumulate_n_v2(random_tensors)
     np_val = random_arrays[0]
     for random_array in random_arrays[1:]:
       np_val += random_array
     self.assertAllClose(np_val, tf_val.eval())
 def testSimple(self):
   with self.test_session():
     random_arrays = [
         np.random.rand(16, 16, 16, 16).astype(np.float32) for _ in range(20)
     ]
     random_tensors = [
         ops.convert_to_tensor(
             x, dtype=dtypes_lib.float32) for x in random_arrays
     ]
     tf_val = av2.accumulate_n_v2(random_tensors)
     np_val = random_arrays[0]
     for random_array in random_arrays[1:]:
       np_val += random_array
     self.assertAllClose(np_val, tf_val.eval())
 def testMinimalEagerMode(self):
     forty = constant_op.constant(40)
     two = constant_op.constant(2)
     answer = av2.accumulate_n_v2([forty, two])
     self.assertEqual(42, answer.numpy())
 def testZeroArgs(self):
     with self.test_session():
         with self.assertRaises(ValueError):
             tf_val = av2.accumulate_n_v2([])
             tf_val.eval()
 def testWrongTypeOneInput(self):
     # Scenario that used to trigger a bug, even when testWrongType() worked
     with self.test_session():
         with self.assertRaises(TypeError):
             a = variables.Variable(0.2, dtype=np.float32)
             tf_val = av2.accumulate_n_v2([a], tensor_dtype=np.int32)
 def testWrongType(self):
     with self.test_session():
         with self.assertRaises(TypeError):
             a = variables.Variable(0.2, dtype=np.float32)
             b = variables.Variable(0.1, dtype=np.float32)
             tf_val = av2.accumulate_n_v2([a, b], tensor_dtype=np.int32)
 def testIncompatibleShapes(self):
     with self.test_session():
         with self.assertRaises(ValueError):
             a = variables.Variable(np.array([0.1, 0.2]))
             b = variables.Variable(np.array([[0.3], [0.4]]))
             tf_val = av2.accumulate_n_v2([a, b])
 def testIncompatibleShapes(self):
   with self.test_session():
     with self.assertRaises(ValueError):
       a = variables.Variable(np.array([0.1, 0.2]))
       b = variables.Variable(np.array([[0.3], [0.4]]))
       tf_val = av2.accumulate_n_v2([a, b])
 def testWrongShape(self):
   with self.test_session():
     with self.assertRaises(ValueError):
       a = variables.Variable(0.2)
       b = variables.Variable(0.1)
       tf_val = av2.accumulate_n_v2([a, b], shape=[2, 2])  # Should be shape=[]
 def testZeroArgs(self):
   with self.test_session():
     with self.assertRaises(ValueError):
       tf_val = av2.accumulate_n_v2([])
       tf_val.eval()
 def fn(first, second, third):
     return av2.accumulate_n_v2([first, second, third])
 def testWrongTypeOneInput(self):
   # Scenario that used to trigger a bug, even when testWrongType() worked
   with self.test_session():
     with self.assertRaises(TypeError):
       a = variables.Variable(0.2, dtype=np.float32)
       tf_val = av2.accumulate_n_v2([a], tensor_dtype=np.int32)
 def testWrongType(self):
   with self.test_session():
     with self.assertRaises(TypeError):
       a = variables.Variable(0.2, dtype=np.float32)
       b = variables.Variable(0.1, dtype=np.float32)
       tf_val = av2.accumulate_n_v2([a, b], tensor_dtype=np.int32)
 def fn(first, second, third):
   return av2.accumulate_n_v2([first, second, third])
 def testMinimalEagerMode(self):
   forty = constant_op.constant(40)
   two = constant_op.constant(2)
   answer = av2.accumulate_n_v2([forty, two])
   self.assertEqual(42, answer.numpy())