def test_same_parameters_after_1_iters(self): n_iters = 1 pccas = PCCASimple(latent_dim=self.k, dims=[self.p1, self.p2], n_iters=n_iters) pccas.Lambda1.data = self.Lambda1 pccas.Lambda2.data = self.Lambda2 pccas.Psi1_diag.data = self.Psi1_diag pccas.Psi2_diag.data = self.Psi2_diag pccav = PCCAVec(latent_dim=self.k, dims=[self.p1, self.p2], n_iters=n_iters) pccav.Lambda1.data = self.Lambda1 pccav.Lambda2.data = self.Lambda2 pccav.Psi1_diag.data = self.Psi1_diag pccav.Psi2_diag.data = self.Psi2_diag pccas.forward(self.y) pccav.forward(self.y) atol = 0.01 self.assertTrue(torch.allclose(pccas.Lambda1, pccav.Lambda1, atol=atol)) self.assertTrue(torch.allclose(pccas.Lambda2, pccav.Lambda2, atol=atol)) self.assertTrue(torch.allclose(pccas.Psi1_diag, pccav.Psi1_diag, atol=atol)) self.assertTrue(torch.allclose(pccas.Psi2_diag, pccav.Psi2_diag, atol=atol))
class UnitTest(unittest.TestCase): def setUp(self): self.n = 1000 p1 = 5 p2 = 3 self.k = 2 self.p = p1 + p2 # Initialize parameters to verify that both implementations compute the # same values. self.P_diag = torch.diag(torch.randn(self.p, self.p)) * 10 self.L = torch.randn(self.p, self.k) * 10 self.y = torch.randn(self.p, self.n) * 10 # We only initialize these in order to access their methods. self.pccas = PCCASimple(latent_dim=self.k, dims=[p1, p2], n_iters=1) self.pccav = PCCAVec(latent_dim=self.k, dims=[p1, p2], n_iters=1) # ------------------------------------------------------------------------------ def test_Psi_rterm(self): Psi_rterm1 = torch.zeros(self.p, self.p) for yi in self.y.t(): Ez1 = self.pccas.E_z_given_y(self.L, self.P_diag, yi) Psi_rterm1 += outer(yi, yi) - outer(self.L @ Ez1, yi) Ez2 = self.pccav.E_z_given_y(self.L, self.P_diag, self.y) Psi_rterm2 = LA.sum_outers(self.y, self.y) - LA.sum_outers(self.L @ Ez2, self.y) self.assertTrue(torch.allclose(Psi_rterm1, Psi_rterm2, atol=0.01)) # ------------------------------------------------------------------------------ # In principle we should test this, but we're literally just diagonalizing # and multiplying by 1./n. It has nothing to do with vectorization. def test_Psi_diag_new(self): Psi_rterm1 = torch.zeros(self.p, self.p) for yi in self.y.t(): Ez1 = self.pccas.E_z_given_y(self.L, self.P_diag, yi) Psi_rterm1 += outer(yi, yi) - outer(self.L @ Ez1, yi) Psi_new1 = 1./self.n * diag(Psi_rterm1) Ez2 = self.pccav.E_z_given_y(self.L, self.P_diag, self.y) Psi_rterm2 = LA.sum_outers(self.y, self.y) - LA.sum_outers(self.L @ Ez2, self.y) Psi_new2 = 1./self.n * diag(Psi_rterm2) self.assertTrue(torch.allclose(Psi_new1, Psi_new2, atol=0.01))
class UnitTest(unittest.TestCase): def setUp(self): self.n = 1000 p1 = 5 p2 = 3 self.k = 2 self.p = p1 + p2 # Initialize parameters to verify that both implementations compute the # same values. self.P_diag = torch.diag(torch.randn(self.p, self.p)) * 10 self.L = torch.randn(self.p, self.k) * 10 self.y = torch.randn(self.p, self.n) * 10 # We only initialize these in order to access their methods. self.pccas = PCCASimple(latent_dim=self.k, dims=[p1, p2], n_iters=1) self.pccav = PCCAVec(latent_dim=self.k, dims=[p1, p2], n_iters=1) self.pccao = PCCAOpt(latent_dim=self.k, dims=[p1, p2], n_iters=1, private_z=False) def test_em_step(self): atol = 0.1 L_new1, P_diag_new1 = self.pccas.em_step(self.y, self.L, self.P_diag) L_new2, P_diag_new2 = self.pccav.em_step(self.y, self.L, self.P_diag) L_new3, P_diag_new3 = self.pccav.em_step(self.y, self.L, self.P_diag) self.assertTrue(torch.allclose(L_new1, L_new2, atol=atol)) self.assertTrue(torch.allclose(L_new2, L_new3, atol=atol)) self.assertTrue(torch.allclose(P_diag_new1, P_diag_new2, atol=atol)) self.assertTrue(torch.allclose(P_diag_new2, P_diag_new3, atol=atol))
def test_same_nlls(self): pccas = PCCASimple(self.k, [self.p1, self.p2], 1) pccav = PCCAVec(self.k, [self.p1, self.p2], 1) n_tries = 3 i = 0 nlls1 = torch.empty(n_tries) nlls2 = torch.empty(n_tries) while True: if i >= n_tries: break # Generate new random parameters. Lambda1 = torch.randn(self.p1, self.k) Lambda2 = torch.randn(self.p2, self.k) Psi1_diag = torch.randn(self.p1) Psi2_diag = torch.randn(self.p2) # Set each model with these new random parameters. pccas.Lambda1.data = Lambda1 pccas.Lambda2.data = Lambda2 pccas.Psi1_diag.data = Psi1_diag pccas.Psi2_diag.data = Psi2_diag pccav.Lambda1.data = Lambda1 pccav.Lambda2.data = Lambda2 pccav.Psi1_diag.data = Psi1_diag pccav.Psi2_diag.data = Psi2_diag Lambda = torch.cat([Lambda1, Lambda2], dim=0) Psi_diag = torch.cat([Psi1_diag, Psi2_diag]) # Compute negative log likelihood for these data and parameters. nll1 = pccas.neg_log_likelihood(self.y, Lambda, Psi_diag) nll2 = pccav.neg_log_likelihood(self.y, Lambda, Psi_diag) if np.isnan(nll1) or np.isnan(nll2): if i > 0: i -= 1 else: nlls1[i] = nll1 nlls2[i] = nll2 i += 1 # These are really big numbers. Close by 10 is actually good, I think. self.assertTrue(torch.allclose(nlls1, nlls2, atol=10))
def setUp(self): self.n = 1000 p1 = 5 p2 = 3 self.k = 2 self.p = p1 + p2 # Initialize parameters to verify that both implementations compute the # same values. self.P_diag = torch.diag(torch.randn(self.p, self.p)) * 10 self.L = torch.randn(self.p, self.k) * 10 self.y = torch.randn(self.p, self.n) * 10 # We only initialize these in order to access their methods. self.pccas = PCCASimple(latent_dim=self.k, dims=[p1, p2], n_iters=1) self.pccav = PCCAVec(latent_dim=self.k, dims=[p1, p2], n_iters=1)
class UnitTest(unittest.TestCase): def setUp(self): self.n = 1000 p1 = 5 p2 = 3 self.k = 2 self.p = p1 + p2 # Initialize parameters to verify that both implementations compute the # same values. self.P_diag = torch.diag(torch.randn(self.p, self.p)) * 10 self.L = torch.randn(self.p, self.k) * 10 self.y = torch.randn(self.p, self.n) * 10 # We only initialize these in order to access their methods. self.pccas = PCCASimple(latent_dim=self.k, dims=[p1, p2], n_iters=1) self.pccav = PCCAVec(latent_dim=self.k, dims=[p1, p2], n_iters=1) # ------------------------------------------------------------------------------ def test_E_z_given_y(self): Ez1 = torch.empty(self.k, self.n) for i in range(self.n): yi = self.y[:, i] Ez1[:, i] = self.pccas.E_z_given_y(self.L, self.P_diag, yi) Ez2 = self.pccav.E_z_given_y(self.L, self.P_diag, self.y) self.assertTrue(torch.allclose(Ez1, Ez2, atol=0.0001)) # ------------------------------------------------------------------------------ def test_E_zzT_given_y(self): Ezz1 = torch.zeros(self.k, self.k) for i in range(self.n): yi = self.y[:, i] Ezz1 += self.pccas.E_zzT_given_y(self.L, self.P_diag, yi, self.k) Ezz2 = self.pccav.E_zzT_given_y(self.L, self.P_diag, self.y, self.k) self.assertTrue(torch.allclose(Ezz1, Ezz2, atol=0.0001)) # ------------------------------------------------------------------------------ def test_Lambda_lterm(self): Lambda_lterm1 = torch.zeros(self.p, self.k) for yi in self.y.t(): Ez1 = self.pccas.E_z_given_y(self.L, self.P_diag, yi) Lambda_lterm1 += outer(yi, Ez1) Ez2 = self.pccav.E_z_given_y(self.L, self.P_diag, self.y) Lambda_lterm2 = LA.sum_outers(self.y, Ez2) self.assertTrue( torch.allclose(Lambda_lterm1, Lambda_lterm2, atol=0.0001)) # ------------------------------------------------------------------------------ def test_Lambda_new(self): Lambda_lterm = torch.zeros(self.p, self.k) Lambda_rterm = torch.zeros(self.k, self.k) for yi in self.y.t(): Ez = self.pccas.E_z_given_y(self.L, self.P_diag, yi) Lambda_lterm += outer(yi, Ez) Lambda_rterm += self.pccas.E_zzT_given_y(self.L, self.P_diag, yi, self.k) Lambda_new1 = Lambda_lterm @ inv(Lambda_rterm) Ez2 = self.pccav.E_z_given_y(self.L, self.P_diag, self.y) Lambda_lterm2 = torch.einsum('ab,cb->ac', [self.y, Ez2]) Lambda_rterm2 = self.pccav.E_zzT_given_y(self.L, self.P_diag, self.y, self.k) Lambda_new2 = Lambda_lterm2 @ inv(Lambda_rterm2) # Increasing tolerance because of compounding round off errors. self.assertTrue(torch.allclose(Lambda_new1, Lambda_new2, atol=0.001))