def test_nandivide(self): foo = ch.array(np.random.randn(16).reshape((4, 4))) bar = ch.array(np.random.randn(16).reshape((4, 4))) bar[2, 2] = 0 self.assertEqual(ch.NanDivide(foo, bar)[2, 2].r, 0.) foo[2, 2] = 0 self.assertEqual(ch.NanDivide(foo, bar)[2, 2].r, 0.)
def test_nandivide(self): foo = ch.array(np.random.randn(16).reshape((4,4))) bar = ch.array(np.random.randn(16).reshape((4,4))) bar[2,2] = 0 self.assertEqual(ch.NanDivide(foo,bar)[2,2].r, 0.) foo[2,2] = 0 self.assertEqual(ch.NanDivide(foo,bar)[2,2].r, 0.)
def test_svd(self): mtx = ch.array(np.random.randn(100).reshape((10, 10))) # Get times for svd from linalg import svd u, s, v = svd(mtx) def setup(): mtx.x = -mtx.x def go_r(): _ = u.r _ = s.r _ = v.r def go_dr(): _ = u.dr_wrt(mtx) _ = s.dr_wrt(mtx) _ = v.dr_wrt(mtx) cht_r = timer(setup, go_r, 20) cht_dr = timer(setup, go_dr, 1) # Get times for numpy svd def go(): u, s, v = np.linalg.svd(mtx.x) npt = timer(setup=None, go=go, n=20) # Compare #print cht_r / npt #print cht_dr / npt self.assertLess(cht_r / npt, 3.3) self.assertLess(cht_dr / npt, 2700)
def test_sum_mean_std_var(self): for fn in [ch.sum, ch.mean, ch.var, ch.std]: # Create fake input and differences in input space data1 = ch.ones((3,4,7,2)) data2 = ch.array(data1.r + .1 * np.random.rand(data1.size).reshape(data1.shape)) diff = data2.r - data1.r # Compute outputs result1 = fn(data1, axis=2) result2 = fn(data2, axis=2) # Empirical and predicted derivatives gt = result2.r - result1.r pred = result1.dr_wrt(data1).dot(diff.ravel()).reshape(gt.shape) #print np.max(np.abs(gt - pred)) if fn in [ch.std, ch.var]: self.assertTrue(1e-2 > np.max(np.abs(gt - pred))) else: self.assertTrue(1e-14 > np.max(np.abs(gt - pred))) # test caching dr0 = result1.dr_wrt(data1) data1[:] = np.random.randn(data1.size).reshape(data1.shape) self.assertTrue(result1.dr_wrt(data1) is dr0) # changing values shouldn't force recompute result1.axis=1 self.assertTrue(result1.dr_wrt(data1) is not dr0) self.assertEqual(ch.mean(ch.eye(3),axis=1).ndim, np.mean(np.eye(3),axis=1).ndim) self.assertEqual(ch.mean(ch.eye(3),axis=0).ndim, np.mean(np.eye(3),axis=0).ndim) self.assertEqual(ch.sum(ch.eye(3),axis=1).ndim, np.sum(np.eye(3),axis=1).ndim) self.assertEqual(ch.sum(ch.eye(3),axis=0).ndim, np.sum(np.eye(3),axis=0).ndim)
def test_svd(self): mtx = ch.array(np.random.randn(100).reshape((10,10))) # Get times for svd from linalg import svd u, s, v = svd(mtx) def setup(): mtx.x = -mtx.x def go_r(): _ = u.r _ = s.r _ = v.r def go_dr(): _ = u.dr_wrt(mtx) _ = s.dr_wrt(mtx) _ = v.dr_wrt(mtx) cht_r = timer(setup, go_r, 20) cht_dr = timer(setup, go_dr, 1) # Get times for numpy svd def go(): u,s,v = np.linalg.svd(mtx.x) npt = timer(setup = None, go = go, n = 20) # Compare #print cht_r / npt #print cht_dr / npt self.assertLess(cht_r / npt, 3.3) self.assertLess(cht_dr / npt, 2700)
def test_sum_mean_std_var(self): for fn in [ch.sum, ch.mean,ch.var, ch.std]: # Create fake input and differences in input space data1 = ch.ones((3,4,7,2)) data2 = ch.array(data1.r + .1 * np.random.rand(data1.size).reshape(data1.shape)) diff = data2.r - data1.r # Compute outputs result1 = fn(data1, axis=2) result2 = fn(data2, axis=2) # Empirical and predicted derivatives gt = result2.r - result1.r pred = result1.dr_wrt(data1).dot(diff.ravel()).reshape(gt.shape) #print np.max(np.abs(gt - pred)) if fn in [ch.std, ch.var]: self.assertTrue(1e-2 > np.max(np.abs(gt - pred))) else: self.assertTrue(1e-14 > np.max(np.abs(gt - pred))) # test caching dr0 = result1.dr_wrt(data1) data1[:] = np.random.randn(data1.size).reshape(data1.shape) self.assertTrue(result1.dr_wrt(data1) is dr0) # changing values shouldn't force recompute result1.axis=1 self.assertTrue(result1.dr_wrt(data1) is not dr0) self.assertEqual(ch.mean(ch.eye(3),axis=1).ndim, np.mean(np.eye(3),axis=1).ndim) self.assertEqual(ch.mean(ch.eye(3),axis=0).ndim, np.mean(np.eye(3),axis=0).ndim) self.assertEqual(ch.sum(ch.eye(3),axis=1).ndim, np.sum(np.eye(3),axis=1).ndim) self.assertEqual(ch.sum(ch.eye(3),axis=0).ndim, np.sum(np.eye(3),axis=0).ndim)
def test_cachehits(self): """Test how many nodes are visited when cache is cleared. If the number of hits changes, it has to be carefully looked at to make sure that correctness and performance don't get messed up by a change.""" a = ch.array(1) b = ch.array(2) c = a for i in range(10): c = a + c + b c.dr_wrt(a) c.dr_wrt(b) self.assertEqual(a.clear_cache() + b.clear_cache(), 59) c.dr_wrt(a) c.dr_wrt(b) self.assertEqual(a.clear_cache(123) + b.clear_cache(123), 41)
def test_cumsum(self): a = ch.array([1.,5.,3.,7.]) cs = ch.cumsum(a) r1 = cs.r dr = cs.dr_wrt(a) diff = (ch.random.rand(4)-.5)*.1 a.x += diff.r pred = dr.dot(diff.r) gt = cs.r - r1 self.assertTrue(1e-13 > np.max(np.abs(gt - pred)))
def test_cumsum(self): a = ch.array([1., 5., 3., 7.]) cs = ch.cumsum(a) r1 = cs.r dr = cs.dr_wrt(a) diff = (ch.random.rand(4) - .5) * .1 a.x += diff.r pred = dr.dot(diff.r) gt = cs.r - r1 self.assertTrue(1e-13 > np.max(np.abs(gt - pred)))
def test_iteration_cache(self): """ Each time you set an attribute, the cache (of r's and dr's) of ancestors is cleared. Because children share ancestors, this means these can be cleared multiple times unnecessarily; in some cases, where lots of objects exist, this cache clearing can actually be a bottleneck. Therefore, the concept of an iteration was added; intended to be used in an optimization setting (see optimization.py) and in the set() method, it avoids such redundant clearing of cache.""" a, b, c = ch.Ch(1), ch.Ch(2), ch.Ch(3) x = a+b y = x+c self.assertTrue(y.r[0]==6) a.__setattr__('x', 10, 1) self.assertTrue(y.r == 15) a.__setattr__('x', 100, 1) self.assertTrue(y.r == 15) a.__setattr__('x', 100, 2) self.assertTrue(y.r == 105) a, b, c = ch.array([1]), ch.array([2]), ch.array([3]) x = a+b y = x+c self.assertTrue(y.r[0]==6) a.__setattr__('x', np.array([10]), 1) self.assertTrue(y.r[0] == 15) a.__setattr__('x', np.array(100), 1) self.assertTrue(y.r[0] == 15) a.__setattr__('x', np.array(100), 2) self.assertTrue(y.r[0] == 105) a.__setitem__(range(0,1), np.array(200), 2) self.assertTrue(y.r[0] == 105) a.__setitem__(range(0,1), np.array(200), 3) self.assertTrue(y.r[0] == 205)
def test_iteration_cache(self): """ Each time you set an attribute, the cache (of r's and dr's) of ancestors is cleared. Because children share ancestors, this means these can be cleared multiple times unnecessarily; in some cases, where lots of objects exist, this cache clearing can actually be a bottleneck. Therefore, the concept of an iteration was added; intended to be used in an optimization setting (see optimization.py) and in the set() method, it avoids such redundant clearing of cache.""" a, b, c = ch.Ch(1), ch.Ch(2), ch.Ch(3) x = a + b y = x + c self.assertTrue(y.r[0] == 6) a.__setattr__('x', 10, 1) self.assertTrue(y.r == 15) a.__setattr__('x', 100, 1) self.assertTrue(y.r == 15) a.__setattr__('x', 100, 2) self.assertTrue(y.r == 105) a, b, c = ch.array([1]), ch.array([2]), ch.array([3]) x = a + b y = x + c self.assertTrue(y.r[0] == 6) a.__setattr__('x', np.array([10]), 1) self.assertTrue(y.r[0] == 15) a.__setattr__('x', np.array(100), 1) self.assertTrue(y.r[0] == 15) a.__setattr__('x', np.array(100), 2) self.assertTrue(y.r[0] == 105) a.__setitem__(range(0, 1), np.array(200), 2) self.assertTrue(y.r[0] == 105) a.__setitem__(range(0, 1), np.array(200), 3) self.assertTrue(y.r[0] == 205)
def test_stacking(self): a1 = ch.Ch(np.arange(10).reshape(2, 5)) b1 = ch.Ch(np.arange(20).reshape(4, 5)) c1 = ch.vstack((a1, b1)) c1_check = np.vstack((a1.r, b1.r)) residuals1 = (c1_check - c1.r).ravel() a2 = ch.Ch(np.arange(10).reshape(5, 2)) b2 = ch.Ch(np.arange(20).reshape(5, 4)) c2 = ch.hstack((a2, b2)) c2_check = np.hstack((a2.r, b2.r)) residuals2 = (c2_check - c2.r).ravel() self.assertFalse(np.any(residuals1)) self.assertFalse(np.any(residuals2)) d0 = ch.array(np.arange(60).reshape((10, 6))) d1 = ch.vstack((d0[:4], d0[4:])) d2 = ch.hstack((d1[:, :3], d1[:, 3:])) tmp = d2.dr_wrt(d0).todense() diff = tmp - np.eye(tmp.shape[0]) self.assertFalse(np.any(diff.ravel()))
def test_stacking(self): a1 = ch.Ch(np.arange(10).reshape(2,5)) b1 = ch.Ch(np.arange(20).reshape(4,5)) c1 = ch.vstack((a1,b1)) c1_check = np.vstack((a1.r, b1.r)) residuals1 = (c1_check - c1.r).ravel() a2 = ch.Ch(np.arange(10).reshape(5,2)) b2 = ch.Ch(np.arange(20).reshape(5,4)) c2 = ch.hstack((a2,b2)) c2_check = np.hstack((a2.r, b2.r)) residuals2 = (c2_check - c2.r).ravel() self.assertFalse(np.any(residuals1)) self.assertFalse(np.any(residuals2)) d0 = ch.array(np.arange(60).reshape((10,6))) d1 = ch.vstack((d0[:4], d0[4:])) d2 = ch.hstack((d1[:,:3], d1[:,3:])) tmp = d2.dr_wrt(d0).todense() diff = tmp - np.eye(tmp.shape[0]) self.assertFalse(np.any(diff.ravel()))
def test_make_sure_is_double(self): x = ch.array([0]) self.assertTrue(isinstance(x.r[0], np.float64))
def test_casting(self): for fn in float, int: self.assertEqual(fn(np.array(5)), fn(ch.array(5))) self.assertEqual(fn(np.array([[5]])), fn(ch.array([[5]])))
def setUp(self): np.random.seed(0) self.mtx_10 = ch.array(np.random.randn(100).reshape((10,10))) self.mtx_1k = ch.array(np.random.randn(1000000).reshape((1000,1000)))
def setUp(self): np.random.seed(0) self.mtx_10 = ch.array(np.random.randn(100).reshape((10, 10))) self.mtx_1k = ch.array(np.random.randn(1000000).reshape((1000, 1000)))