def test_normal_arithmetic(): chol = np.random.randn(3, 3) dist = Normal(chol.dot(chol.T), np.random.randn(3, 1)) chol = np.random.randn(3, 3) dist2 = Normal(chol.dot(chol.T), np.random.randn(3, 1)) A = np.random.randn(3, 3) a = np.random.randn(1, 3) b = 5. # Test matrix multiplication. yield ok, allclose((dist.rmatmul(a)).mean, dist.mean.dot(a)), 'mean mul' yield ok, allclose((dist.rmatmul(a)).var, a.dot(dense(dist.var)).dot(a.T)), 'var mul' yield ok, allclose((dist.lmatmul(A)).mean, A.dot(dist.mean)), 'mean rmul' yield ok, allclose((dist.lmatmul(A)).var, A.dot(dense(dist.var)).dot(A.T)), 'var rmul' # Test multiplication. yield ok, allclose((dist * b).mean, dist.mean * b), 'mean mul 2' yield ok, allclose((dist * b).var, dist.var * b**2), 'var mul 2' yield ok, allclose((b * dist).mean, dist.mean * b), 'mean rmul 2' yield ok, allclose((b * dist).var, dist.var * b**2), 'var rmul 2' yield raises, NotImplementedError, lambda: dist.__mul__(dist) yield raises, NotImplementedError, lambda: dist.__rmul__(dist) # Test addition. yield ok, allclose((dist + dist2).mean, dist.mean + dist2.mean), 'mean sum' yield ok, allclose((dist + dist2).var, dist.var + dist2.var), 'var sum' yield ok, allclose((dist.__add__(b)).mean, dist.mean + b), 'mean add' yield ok, allclose((dist.__radd__(b)).mean, dist.mean + b), 'mean radd'
def test_equality(): # Test `Dense.` a = Dense(np.random.randn(4, 2)) yield assert_allclose, a == a, dense(a) == dense(a) # Test `Diagonal`. d = Diagonal(np.random.randn(4)) yield assert_allclose, d == d, B.diag(d) == B.diag(d) # Test `LowRank`. lr = LowRank(left=np.random.randn(4, 2), middle=np.random.randn(2, 2)) yield assert_allclose, lr == lr, (lr.l == lr.l, lr.m == lr.m, lr.r == lr.r) # Test `Woodbury`. yield assert_allclose, (lr + d) == (lr + d), (lr == lr, d == d) # Test `Constant`. c1 = Constant.from_(1, a) c1_2 = Constant(1, 4, 3) c2 = Constant.from_(2, a) yield eq, c1, c1 yield neq, c1, c1_2 yield neq, c1, c2 # Test `One`. one1 = One(np.float64, 4, 2) one2 = One(np.float64, 4, 3) yield eq, one1, one1 yield neq, one1, one2 # Test `Zero`. zero1 = Zero(np.float64, 4, 2) zero2 = Zero(np.float64, 4, 3) yield eq, zero1, zero1 yield neq, zero1, zero2
def test_qf(): # Generate some test inputs. b, c = np.random.randn(5, 3), np.random.randn(5, 3) # Generate some matrices to test. a = np.random.randn(5, 5) a = Dense(a.dot(a.T)) d = Diagonal(B.diag(a)) e = np.random.randn(2, 2) wb = d + LowRank(left=np.random.randn(5, 2), middle=e.dot(e.T)) for x in [a, d, wb]: yield assert_allclose, B.qf(x, b), \ np.linalg.solve(dense(x), b).T.dot(b) yield assert_allclose, B.qf(x, b, c), \ np.linalg.solve(dense(x), b).T.dot(c) yield assert_allclose, B.qf_diag(x, b), \ np.diag(np.linalg.solve(dense(x), b).T.dot(b)) yield assert_allclose, B.qf_diag(x, b, c), \ np.diag(np.linalg.solve(dense(x), b).T.dot(c)) # Test `LowRank`. lr = LowRank(np.random.randn(5, 3)) yield raises, RuntimeError, lambda: B.qf(lr, b) yield raises, RuntimeError, lambda: B.qf(lr, b, c) yield raises, RuntimeError, lambda: B.qf_diag(lr, b) yield raises, RuntimeError, lambda: B.qf_diag(lr, b, c)
def test_inverse_and_logdet(): # Test `Dense`. a = np.random.randn(3, 3) a = Dense(a.dot(a.T)) yield assert_allclose, B.matmul(a, B.inverse(a)), np.eye(3) yield assert_allclose, B.matmul(B.inverse(a), a), np.eye(3) yield assert_allclose, B.logdet(a), np.log(np.linalg.det(dense(a))) # Test `Diagonal`. d = Diagonal([1, 2, 3]) yield assert_allclose, B.matmul(d, B.inverse(d)), np.eye(3) yield assert_allclose, B.matmul(B.inverse(d), d), np.eye(3) yield assert_allclose, B.logdet(d), np.log(np.linalg.det(dense(d))) yield eq, B.shape(B.inverse(Diagonal([1, 2], rows=2, cols=4))), (4, 2) # Test `Woodbury`. a = np.random.randn(3, 2) b = np.random.randn(2, 2) + 1e-2 * np.eye(2) wb = d + LowRank(left=a, middle=b.dot(b.T)) for _ in range(4): yield assert_allclose, B.matmul(wb, B.inverse(wb)), np.eye(3) yield assert_allclose, B.matmul(B.inverse(wb), wb), np.eye(3) yield assert_allclose, B.logdet(wb), np.log(np.linalg.det(dense(wb))) wb = B.inverse(wb) # Test `LowRank`. yield raises, RuntimeError, lambda: B.inverse(wb.lr) yield raises, RuntimeError, lambda: B.logdet(wb.lr)
def test_normal_arithmetic(): chol = np.random.randn(3, 3) dist = Normal(chol.dot(chol.T), np.random.randn(3, 1)) chol = np.random.randn(3, 3) dist2 = Normal(chol.dot(chol.T), np.random.randn(3, 1)) A = np.random.randn(3, 3) a = np.random.randn(1, 3) b = 5. # Test matrix multiplication. allclose((dist.rmatmul(a)).mean, dist.mean.dot(a)) allclose((dist.rmatmul(a)).var, a.dot(dense(dist.var)).dot(a.T)) allclose((dist.lmatmul(A)).mean, A.dot(dist.mean)) allclose((dist.lmatmul(A)).var, A.dot(dense(dist.var)).dot(A.T)) # Test multiplication. allclose((dist * b).mean, dist.mean * b) allclose((dist * b).var, dist.var * b**2) allclose((b * dist).mean, dist.mean * b) allclose((b * dist).var, dist.var * b**2) with pytest.raises(NotImplementedError): dist.__mul__(dist) with pytest.raises(NotImplementedError): dist.__rmul__(dist) # Test addition. allclose((dist + dist2).mean, dist.mean + dist2.mean) allclose((dist + dist2).var, dist.var + dist2.var) allclose((dist.__add__(b)).mean, dist.mean + b) allclose((dist.__radd__(b)).mean, dist.mean + b)
def test_sample(): a = np.random.randn(3, 3) a = Dense(a.dot(a.T)) b = np.random.randn(2, 2) wb = Diagonal(B.diag(a)) + \ LowRank(left=np.random.randn(3, 2), middle=b.dot(b.T)) # Test `Dense` and `Woodbury`. num_samps = 500000 for cov in [a, wb]: samps = B.sample(cov, num_samps) cov_emp = B.matmul(samps, samps, tr_b=True) / num_samps yield le, np.mean(np.abs(dense(cov_emp) - dense(cov))), 5e-2
def test_schur(): # Test `Dense`. a = np.random.randn(5, 10) b = np.random.randn(3, 5) c = np.random.randn(3, 3) d = np.random.randn(3, 10) c = c.dot(c.T) yield ok, allclose(B.schur(a, b, c, d), a - np.linalg.solve(c.T, b).T.dot(d)), 'n n n n' # Test `Woodbury`. # The inverse of the Woodbury matrix already properly tests the method for # Woodbury matrices. c = np.random.randn(2, 2) c = Diagonal(np.array([1, 2, 3])) + \ LowRank(left=np.random.randn(3, 2), middle=c.dot(c.T)) yield ok, allclose(B.schur(a, b, c, d), a - np.linalg.solve(dense(c).T, b).T.dot(d)), 'n n w n' # Test all combinations of `Woodbury`, `LowRank`, and `Diagonal`. a = np.random.randn(2, 2) a = Diagonal(np.array([4, 5, 6, 7, 8]), rows=5, cols=10) + \ LowRank(left=np.random.randn(5, 2), right=np.random.randn(10, 2), middle=a.dot(a.T)) b = np.random.randn(2, 2) b = Diagonal(np.array([9, 10, 11]), rows=3, cols=5) + \ LowRank(left=c.lr.left, right=a.lr.left, middle=b.dot(b.T)) d = np.random.randn(2, 2) d = Diagonal(np.array([12, 13, 14]), rows=3, cols=10) + \ LowRank(left=c.lr.right, right=a.lr.right, middle=d.dot(d.T)) # Loop over all combinations. Some of them should be efficient and # representation preserving; all of them should be correct. for ai in [a, a.lr, a.diag]: for bi in [b, b.lr, b.diag]: for ci in [c, c.diag]: for di in [d, d.lr, d.diag]: yield ok, allclose( B.schur(ai, bi, ci, di), dense(ai) - np.linalg.solve(dense(ci).T, dense(bi)).T.dot( dense(di))), '{} {} {} {}'.format(ai, bi, ci, di)
def test_arithmetic_and_shapes(): a = Dense(np.random.randn(4, 3)) d = Diagonal(np.array([1.0, 2.0, 3.0]), rows=4, cols=3) lr = LowRank(left=np.random.randn(4, 2), right=np.random.randn(3, 2), middle=np.random.randn(2, 2)) zero = Zero.from_(a) one = One.from_(a) constant = Constant.from_(2.0, a) wb = d + lr # Aggregate all matrices. candidates = [a, d, lr, wb, constant, one, zero, 2, 1, 0] # Check division. yield assert_allclose, a.__div__(5.0), dense(a) / 5.0 yield assert_allclose, a.__truediv__(5.0), dense(a) / 5.0 # Check shapes. for m in candidates: yield eq, B.shape(a), (4, 3) # Check interactions. for m1, m2 in product(candidates, candidates): yield assert_allclose, m1 * m2, dense(m1) * dense(m2) yield assert_allclose, m1 + m2, dense(m1) + dense(m2) yield assert_allclose, m1 - m2, dense(m1) - dense(m2)
def test_sum(): a = Dense(np.random.randn(10, 20)) yield assert_allclose, B.sum(a, axis=0), np.sum(dense(a), axis=0) for x in [ Diagonal(np.array([1, 2, 3]), rows=3, cols=5), LowRank(left=np.random.randn(5, 3), right=np.random.randn(10, 3), middle=np.random.randn(3, 3)) ]: yield assert_allclose, B.sum(x), np.sum(dense(x)) yield assert_allclose, B.sum(x, axis=0), np.sum(dense(x), axis=0) yield assert_allclose, B.sum(x, axis=1), np.sum(dense(x), axis=1) yield assert_allclose, B.sum(x, axis=(0, 1)), np.sum(dense(x), axis=(0, 1))
def test_cholesky(): a = np.random.randn(5, 5) a = a.T.dot(a) # Test `Dense`. yield assert_allclose, np.linalg.cholesky(a), B.cholesky(a) # Test `Diagonal`. d = Diagonal(np.diag(a)) yield assert_allclose, np.linalg.cholesky(dense(d)), B.cholesky(d) # Test `LowRank`. a = np.random.randn(2, 2) lr = LowRank(left=np.random.randn(5, 2), middle=a.dot(a.T)) chol = dense(B.cholesky(lr)) # The Cholesky here is not technically the Cholesky decomposition. Hence # we test this slightly differently. yield assert_allclose, chol.dot(chol.T), lr
def test_ratio(): a, b = np.random.randn(4, 4), np.random.randn(4, 4) a, b = Dense(a.dot(a.T)), Dense(b.dot(b.T)) d, e = Diagonal(B.diag(a)), Diagonal(B.diag(b)) c = np.random.randn(3, 3) lr = LowRank(left=np.random.randn(4, 3), middle=c.dot(c.T)) yield assert_allclose, B.ratio(a, b), \ np.trace(np.linalg.solve(dense(b), dense(a))) yield assert_allclose, B.ratio(lr, b), \ np.trace(np.linalg.solve(dense(b), dense(lr))) yield assert_allclose, B.ratio(d, e), \ np.trace(np.linalg.solve(dense(e), dense(d)))
def test_derivative(): # First, check properties. k = EQ().diff(0) yield eq, k.stationary, False yield raises, RuntimeError, lambda: k.length_scale yield raises, RuntimeError, lambda: k.var yield raises, RuntimeError, lambda: k.period # Test equality. yield eq, EQ().diff(0), EQ().diff(0) yield neq, EQ().diff(0), EQ().diff(1) yield neq, Matern12().diff(0), EQ().diff(0) yield raises, RuntimeError, lambda: EQ().diff(None, None)(1) # Third, check computation. B.backend_to_tf() s = B.Session() # Test derivative of kernel EQ. k = EQ() x1 = B.array(np.random.randn(10, 1)) x2 = B.array(np.random.randn(5, 1)) # Test derivative with respect to first input. ref = s.run(-dense(k(x1, x2)) * (x1 - B.transpose(x2))) yield assert_allclose, s.run(dense(k.diff(0, None)(x1, x2))), ref ref = s.run(-dense(k(x1)) * (x1 - B.transpose(x1))) yield assert_allclose, s.run(dense(k.diff(0, None)(x1))), ref # Test derivative with respect to second input. ref = s.run(-dense(k(x1, x2)) * (B.transpose(x2) - x1)) yield assert_allclose, s.run(dense(k.diff(None, 0)(x1, x2))), ref ref = s.run(-dense(k(x1)) * (B.transpose(x1) - x1)) yield assert_allclose, s.run(dense(k.diff(None, 0)(x1))), ref # Test derivative with respect to both inputs. ref = s.run(dense(k(x1, x2)) * (1 - (x1 - B.transpose(x2))**2)) yield assert_allclose, s.run(dense(k.diff(0, 0)(x1, x2))), ref yield assert_allclose, s.run(dense(k.diff(0)(x1, x2))), ref ref = s.run(dense(k(x1)) * (1 - (x1 - B.transpose(x1))**2)) yield assert_allclose, s.run(dense(k.diff(0, 0)(x1))), ref yield assert_allclose, s.run(dense(k.diff(0)(x1))), ref # Test derivative of kernel Linear. k = Linear() x1 = B.array(np.random.randn(10, 1)) x2 = B.array(np.random.randn(5, 1)) # Test derivative with respect to first input. ref = s.run(B.ones((10, 5), dtype=np.float64) * B.transpose(x2)) yield assert_allclose, s.run(dense(k.diff(0, None)(x1, x2))), ref ref = s.run(B.ones((10, 10), dtype=np.float64) * B.transpose(x1)) yield assert_allclose, s.run(dense(k.diff(0, None)(x1))), ref # Test derivative with respect to second input. ref = s.run(B.ones((10, 5), dtype=np.float64) * x1) yield assert_allclose, s.run(dense(k.diff(None, 0)(x1, x2))), ref ref = s.run(B.ones((10, 10), dtype=np.float64) * x1) yield assert_allclose, s.run(dense(k.diff(None, 0)(x1))), ref # Test derivative with respect to both inputs. ref = s.run(B.ones((10, 5), dtype=np.float64)) yield assert_allclose, s.run(dense(k.diff(0, 0)(x1, x2))), ref yield assert_allclose, s.run(dense(k.diff(0)(x1, x2))), ref ref = s.run(B.ones((10, 10), dtype=np.float64)) yield assert_allclose, s.run(dense(k.diff(0, 0)(x1))), ref yield assert_allclose, s.run(dense(k.diff(0)(x1))), ref s.close() B.backend_to_np()
def test_mokernel(): m = Graph() p1 = GP(1 * EQ(), graph=m) p2 = GP(2 * EQ().stretch(2), graph=m) mok = MultiOutputKernel(p1, p2) ks = m.kernels x1 = np.linspace(0, 1, 10) x2 = np.linspace(1, 2, 5) x3 = np.linspace(1, 2, 10) yield eq, str(mok), 'MultiOutputKernel(EQ(), 2 * (EQ() > 2))' # `B.Numeric` versus `B.Numeric`: yield assert_allclose, mok(x1, x2), \ np.concatenate([np.concatenate([dense(ks[p1, p1](x1, x2)), dense(ks[p1, p2](x1, x2))], axis=1), np.concatenate([dense(ks[p2, p1](x1, x2)), dense(ks[p2, p2](x1, x2))], axis=1)], axis=0) yield assert_allclose, mok.elwise(x1, x3), \ np.concatenate([ks[p1, p1].elwise(x1, x3), ks[p2, p2].elwise(x1, x3)], axis=0) # `B.Numeric` versus `At`: yield assert_allclose, mok(p1(x1), x2), \ np.concatenate([dense(ks[p1, p1](x1, x2)), dense(ks[p1, p2](x1, x2))], axis=1) yield assert_allclose, mok(p2(x1), x2), \ np.concatenate([dense(ks[p2, p1](x1, x2)), dense(ks[p2, p2](x1, x2))], axis=1) yield assert_allclose, mok(x1, p1(x2)), \ np.concatenate([dense(ks[p1, p1](x1, x2)), dense(ks[p2, p1](x1, x2))], axis=0) yield assert_allclose, mok(x1, p2(x2)), \ np.concatenate([dense(ks[p1, p2](x1, x2)), dense(ks[p2, p2](x1, x2))], axis=0) yield raises, ValueError, lambda: mok.elwise(x1, p2(x3)) yield raises, ValueError, lambda: mok.elwise(p1(x1), x3) # `At` versus `At`: yield assert_allclose, mok(p1(x1), p1(x2)), ks[p1](x1, x2) yield assert_allclose, mok(p1(x1), p2(x2)), ks[p1, p2](x1, x2) yield assert_allclose, mok.elwise(p1(x1), p1(x3)), ks[p1].elwise(x1, x3) yield assert_allclose, mok.elwise(p1(x1), p2(x3)), ks[p1, p2].elwise(x1, x3) # `MultiInput` versus `MultiInput`: yield assert_allclose, mok(MultiInput(p2(x1), p1(x2)), MultiInput(p2(x1))), \ np.concatenate([dense(ks[p2, p2](x1, x1)), dense(ks[p1, p2](x2, x1))], axis=0) yield raises, ValueError, \ lambda: mok.elwise(MultiInput(p2(x1), p1(x3)), MultiInput(p2(x1))) yield assert_allclose, mok.elwise(MultiInput(p2(x1), p1(x3)), MultiInput(p2(x1), p1(x3))), \ np.concatenate([ks[p2, p2].elwise(x1, x1), ks[p1, p1].elwise(x3, x3)], axis=0) # `MultiInput` versus `At`: yield assert_allclose, mok(MultiInput(p2(x1), p1(x2)), p2(x1)), \ np.concatenate([dense(ks[p2, p2](x1, x1)), dense(ks[p1, p2](x2, x1))], axis=0) yield assert_allclose, mok(p2(x1), MultiInput(p2(x1), p1(x2))), \ np.concatenate([dense(ks[p2, p2](x1, x1)), dense(ks[p2, p1](x1, x2))], axis=1) yield raises, ValueError, \ lambda: mok.elwise(MultiInput(p2(x1), p1(x3)), p2(x1)) yield raises, ValueError, \ lambda: mok.elwise(p2(x1), MultiInput(p2(x1), p1(x3)))
def test_dense(): a = np.random.randn(5, 3) yield assert_allclose, dense(Dense(a)), a yield assert_allclose, dense(a), a # Extensively test Diagonal. yield assert_allclose, \ dense(Diagonal([1, 2])), \ np.array([[1, 0], [0, 2]]) yield assert_allclose, \ dense(Diagonal([1, 2], 3)), \ np.array([[1, 0, 0], [0, 2, 0], [0, 0, 0]]) yield assert_allclose, \ dense(Diagonal([1, 2], 1)), \ np.array([[1]]) yield assert_allclose, \ dense(Diagonal([1, 2], 3, 3)), \ np.array([[1, 0, 0], [0, 2, 0], [0, 0, 0]]) yield assert_allclose, \ dense(Diagonal([1, 2], 2, 3)), \ np.array([[1, 0, 0], [0, 2, 0]]) yield assert_allclose, \ dense(Diagonal([1, 2], 3, 2)), \ np.array([[1, 0], [0, 2], [0, 0]]) yield assert_allclose, \ dense(Diagonal([1, 2], 1, 3)), \ np.array([[1, 0, 0]]) yield assert_allclose, \ dense(Diagonal([1, 2], 3, 1)), \ np.array([[1], [0], [0]]) # Test low-rank matrices. left = np.random.randn(5, 3) right = np.random.randn(10, 3) middle = np.random.randn(3, 3) lr = LowRank(left=left, right=right, middle=middle) yield assert_allclose, dense(lr), left.dot(middle).dot(right.T) # Test Woodbury matrices. diag = Diagonal([1, 2, 3, 4], 5, 10) wb = Woodbury(diag=diag, lr=lr) yield assert_allclose, dense(wb), dense(diag) + dense(lr)
def test_eye_form(): a = Dense(np.random.randn(5, 10)) yield assert_allclose, dense(B.eye_from(a)), np.eye(5, 10) yield assert_allclose, dense(B.eye_from(a.T)), np.eye(10, 5)
def test_dense_methods(): a = Dense(np.random.randn(10, 5)) yield assert_allclose, dense(a.T), dense(a).T yield assert_allclose, dense(-a), -dense(a) yield assert_allclose, dense(a[5]), dense(a)[5]
def compare(a): assert_allclose(B.transpose(a), dense(a).T)
def test_mokernel(): m = Graph() p1 = GP(1 * EQ(), graph=m) p2 = GP(2 * EQ().stretch(2), graph=m) mok = MultiOutputKernel(p1, p2) ks = m.kernels x1 = np.linspace(0, 1, 10) x2 = np.linspace(1, 2, 5) x3 = np.linspace(1, 2, 10) assert str(mok) == 'MultiOutputKernel(EQ(), 2 * (EQ() > 2))' # `B.Numeric` versus `B.Numeric`: allclose(mok(x1, x2), np.concatenate([np.concatenate([dense(ks[p1, p1](x1, x2)), dense(ks[p1, p2](x1, x2))], axis=1), np.concatenate([dense(ks[p2, p1](x1, x2)), dense(ks[p2, p2](x1, x2))], axis=1)], axis=0)) allclose(mok.elwise(x1, x3), np.concatenate([ks[p1, p1].elwise(x1, x3), ks[p2, p2].elwise(x1, x3)], axis=0)) # `B.Numeric` versus `At`: allclose(mok(p1(x1), x2), np.concatenate([dense(ks[p1, p1](x1, x2)), dense(ks[p1, p2](x1, x2))], axis=1)) allclose(mok(p2(x1), x2), np.concatenate([dense(ks[p2, p1](x1, x2)), dense(ks[p2, p2](x1, x2))], axis=1)) allclose(mok(x1, p1(x2)), np.concatenate([dense(ks[p1, p1](x1, x2)), dense(ks[p2, p1](x1, x2))], axis=0)) allclose(mok(x1, p2(x2)), np.concatenate([dense(ks[p1, p2](x1, x2)), dense(ks[p2, p2](x1, x2))], axis=0)) with pytest.raises(ValueError): mok.elwise(x1, p2(x3)) with pytest.raises(ValueError): mok.elwise(p1(x1), x3) # `At` versus `At`: allclose(mok(p1(x1), p1(x2)), ks[p1](x1, x2)) allclose(mok(p1(x1), p2(x2)), ks[p1, p2](x1, x2)) allclose(mok.elwise(p1(x1), p1(x3)), ks[p1].elwise(x1, x3)) allclose(mok.elwise(p1(x1), p2(x3)), ks[p1, p2].elwise(x1, x3)) # `MultiInput` versus `MultiInput`: allclose(mok(MultiInput(p2(x1), p1(x2)), MultiInput(p2(x1))), np.concatenate([dense(ks[p2, p2](x1, x1)), dense(ks[p1, p2](x2, x1))], axis=0)) with pytest.raises(ValueError): mok.elwise(MultiInput(p2(x1), p1(x3)), MultiInput(p2(x1))) allclose(mok.elwise(MultiInput(p2(x1), p1(x3)), MultiInput(p2(x1), p1(x3))), np.concatenate([ks[p2, p2].elwise(x1, x1), ks[p1, p1].elwise(x3, x3)], axis=0)) # `MultiInput` versus `At`: allclose(mok(MultiInput(p2(x1), p1(x2)), p2(x1)), np.concatenate([dense(ks[p2, p2](x1, x1)), dense(ks[p1, p2](x2, x1))], axis=0)) allclose(mok(p2(x1), MultiInput(p2(x1), p1(x2))), np.concatenate([dense(ks[p2, p2](x1, x1)), dense(ks[p2, p1](x1, x2))], axis=1)) with pytest.raises(ValueError): mok.elwise(MultiInput(p2(x1), p1(x3)), p2(x1)) with pytest.raises(ValueError): mok.elwise(p2(x1), MultiInput(p2(x1), p1(x3)))
def test_matmul(): diag_square = Diagonal([1, 2], 3) diag_tall = Diagonal([3, 4], 5, 3) diag_wide = Diagonal([5, 6], 2, 3) dense_square = Dense(np.random.randn(3, 3)) dense_tall = Dense(np.random.randn(5, 3)) dense_wide = Dense(np.random.randn(2, 3)) lr = LowRank(left=np.random.randn(5, 2), right=np.random.randn(3, 2), middle=np.random.randn(2, 2)) def compare(a, b): return allclose(B.matmul(a, b), B.matmul(dense(a), dense(b))) # Test `Dense`. yield ok, compare(dense_wide, dense_tall.T), 'dense w x dense t' # Test `LowRank`. yield ok, compare(lr, dense_tall.T), 'lr x dense t' yield ok, compare(dense_wide, lr.T), 'dense w x lr' yield ok, compare(lr, diag_tall.T), 'lr x diag t' yield ok, compare(diag_wide, lr.T), 'diag w x lr' yield ok, compare(lr, lr.T), 'lr x lr' yield ok, compare(lr.T, lr), 'lr x lr (2)' # Test `Diagonal`. # Test multiplication between diagonal matrices. yield ok, compare(diag_square, diag_square.T), 'diag s x diag s' yield ok, compare(diag_tall, diag_square.T), 'diag t x diag s' yield ok, compare(diag_wide, diag_square.T), 'diag w x diag s' yield ok, compare(diag_square, diag_tall.T), 'diag s x diag t' yield ok, compare(diag_tall, diag_tall.T), 'diag t x diag t' yield ok, compare(diag_wide, diag_tall.T), 'diag w x diag t' yield ok, compare(diag_square, diag_wide.T), 'diag s x diag w' yield ok, compare(diag_tall, diag_wide.T), 'diag t x diag w' yield ok, compare(diag_wide, diag_wide.T), 'diag w x diag w' # Test multiplication between diagonal and dense matrices. yield ok, compare(diag_square, dense_square.T), 'diag s x dense s' yield ok, compare(diag_square, dense_tall.T), 'diag s x dense t' yield ok, compare(diag_square, dense_wide.T), 'diag s x dense w' yield ok, compare(diag_tall, dense_square.T), 'diag t x dense s' yield ok, compare(diag_tall, dense_tall.T), 'diag t x dense t' yield ok, compare(diag_tall, dense_wide.T), 'diag t x dense w' yield ok, compare(diag_wide, dense_square.T), 'diag w x dense s' yield ok, compare(diag_wide, dense_tall.T), 'diag w x dense t' yield ok, compare(diag_wide, dense_wide.T), 'diag w x dense w' yield ok, compare(dense_square, diag_square.T), 'dense s x diag s' yield ok, compare(dense_square, diag_tall.T), 'dense s x diag t' yield ok, compare(dense_square, diag_wide.T), 'dense s x diag w' yield ok, compare(dense_tall, diag_square.T), 'dense t x diag s' yield ok, compare(dense_tall, diag_tall.T), 'dense t x diag t' yield ok, compare(dense_tall, diag_wide.T), 'dense t x diag w' yield ok, compare(dense_wide, diag_square.T), 'dense w x diag s' yield ok, compare(dense_wide, diag_tall.T), 'dense w x diag t' yield ok, compare(dense_wide, diag_wide.T), 'dense w x diag w' # Test `B.matmul` with three matrices simultaneously. yield assert_allclose, \ B.matmul(dense_tall, dense_square, dense_wide, tr_c=True), \ dense(dense_tall).dot(dense(dense_square)).dot(dense(dense_wide).T) # Test `Woodbury`. wb = lr + dense_tall yield ok, compare(wb, dense_square.T), 'wb x dense s' yield ok, compare(dense_square, wb.T), 'dense s x wb' yield ok, compare(wb, wb.T), 'wb x wb' yield ok, compare(wb.T, wb), 'wb x wb (2)'
def compare(a, b): return allclose(B.matmul(a, b), B.matmul(dense(a), dense(b)))