def run_test(*args): num_samples = 1000 tol = 0.1 # High tolerance to keep the # of samples low else the test # takes a long time to run. random.seed(10) outputs = [random.randn(*args) for _ in range(num_samples)] # Test output shape. for output in outputs: self.assertEqual(output.shape, tuple(args)) self.assertEqual(output.dtype.type, random.DEFAULT_RANDN_DTYPE) if np.prod(args): # Don't bother with empty arrays. outputs = [output.tolist() for output in outputs] # Test that the properties of normal distribution are satisfied. mean = np.mean(outputs, axis=0) stddev = np.std(outputs, axis=0) self.assertAllClose(mean, np.zeros(args), atol=tol) self.assertAllClose(stddev, np.ones(args), atol=tol) # Test that outputs are different with different seeds. random.seed(20) diff_seed_outputs = [ random.randn(*args).tolist() for _ in range(num_samples) ] self.assertNotAllClose(outputs, diff_seed_outputs) # Test that outputs are the same with the same seed. random.seed(10) same_seed_outputs = [ random.randn(*args).tolist() for _ in range(num_samples) ] self.assertAllClose(outputs, same_seed_outputs)
def testJit(self): def f(a, b): return math.sum(math.sqrt(math.exp(a)) + b) f_jitted = extensions.jit(f) shape = [10] a = random.randn(*shape) b = random.randn(*shape) self.assertAllClose(f(a, b), f_jitted(a, b)) # Call again since the code path is different on second call self.assertAllClose(f(a, b), f_jitted(a, b))
def testGrad(self): def f(a, b): return math.sum(math.sqrt(math.exp(a)) + b) g = extensions.grad(f) def compare(a, b): with tf.GradientTape() as tape: tape.watch(a.data) r = f(a, b) expected = tape.gradient(r.data, a.data) self.assertAllEqual(expected, g(a, b)) shape = [10] a = random.randn(*shape) b = random.randn(*shape) compare(a, b)