def test_objective_sym_optimum(): sigma = 1. lmbda = 1. Z = np.random.randn(100, 2) K = gaussian_kernel(Z, sigma=sigma) a = develop_gaussian.fit_sym(Z, sigma, lmbda, K) J_opt = develop_gaussian.objective_sym(Z, sigma, lmbda, a, K) for _ in range(10): a_random = np.random.randn(len(Z)) J = develop_gaussian.objective_sym(Z, sigma, lmbda, a_random, K) assert J >= J_opt
def test_objective_sym_same_as_from_estimation(): sigma = 1. lmbda = 1. Z = np.random.randn(100, 2) K = gaussian_kernel(Z, sigma=sigma) a = develop_gaussian.fit_sym(Z, sigma, lmbda, K) C = develop_gaussian.compute_C_sym(Z, K, sigma) b = develop_gaussian.compute_b_sym(Z, K, sigma) J = develop_gaussian.objective_sym(Z, sigma, lmbda, a, K, b, C) J2 = develop_gaussian.objective_sym(Z, sigma, lmbda, a, K) assert_almost_equal(J, J2)
def test_objective_sym_against_naive(): sigma = 1. D = 2 N = 10 Z = np.random.randn(N, D) K = gaussian_kernel(Z, sigma=sigma) num_trials = 10 for _ in range(num_trials): alpha = np.random.randn(N) J_naive_a = 0 for d in range(D): for i in range(N): for j in range(N): J_naive_a += alpha[i] * K[i, j] * \ (-1 + 2. / sigma * ((Z[i][d] - Z[j][d]) ** 2)) J_naive_a *= (2. / (N * sigma)) J_naive_b = 0 for d in range(D): for i in range(N): temp = 0 for j in range(N): temp += alpha[j] * (Z[j, d] - Z[i, d]) * K[i, j] J_naive_b += (temp**2) J_naive_b *= (2. / (N * (sigma**2))) J_naive = J_naive_a + J_naive_b # compare to unregularised objective lmbda = 0. J = develop_gaussian.objective_sym(Z, sigma, lmbda, alpha, K) assert_close(J_naive, J)
def test_objective_sym_against_naive(): sigma = 1. D = 2 N = 10 Z = np.random.randn(N, D) K = gaussian_kernel(Z, sigma=sigma) num_trials = 10 for _ in range(num_trials): alpha = np.random.randn(N) J_naive_a = 0 for d in range(D): for i in range(N): for j in range(N): J_naive_a += alpha[i] * K[i, j] * \ (-1 + 2. / sigma * ((Z[i][d] - Z[j][d]) ** 2)) J_naive_a *= (2. / (N * sigma)) J_naive_b = 0 for d in range(D): for i in range(N): temp = 0 for j in range(N): temp += alpha[j] * (Z[j, d] - Z[i, d]) * K[i, j] J_naive_b += (temp ** 2) J_naive_b *= (2. / (N * (sigma ** 2))) J_naive = J_naive_a + J_naive_b # compare to unregularised objective lmbda = 0. J = develop_gaussian.objective_sym(Z, sigma, lmbda, alpha, K) assert_close(J_naive, J)
def test_objective_matches_sym(): sigma = 1. lmbda = 1. Z = np.random.randn(100, 2) alpha = np.random.randn(len(Z)) J_sym = develop_gaussian.objective_sym(Z, sigma, lmbda, alpha) J = gaussian.objective(Z, Z, sigma, lmbda, alpha) print type(J) print type(J_sym) assert_equal(J, J_sym)
def test_objective_sym_matches_full(): sigma = 1. lmbda = 1. Z = np.random.randn(100, 2) K = gaussian_kernel(Z, sigma=sigma) a_opt = develop_gaussian.fit_sym(Z, sigma, lmbda, K) J_opt = develop_gaussian.objective_sym(Z, sigma, lmbda, a_opt, K) L = incomplete_cholesky_gaussian(Z, sigma, eta=0.01)["R"].T a_opt_chol = develop_gaussian_low_rank.fit_sym(Z, sigma, lmbda, L) J_opt_chol = develop_gaussian_low_rank.objective_sym(Z, sigma, lmbda, a_opt_chol, L) assert_almost_equal(J_opt, J_opt_chol, delta=2.)
def test_objective_matches_sym_precomputed_KbC(): sigma = 1. lmbda = 1. Z = np.random.randn(100, 2) K = gaussian_kernel(Z, sigma=sigma) alpha = np.random.randn(len(Z)) C = develop_gaussian.compute_C_sym(Z, K, sigma) b = develop_gaussian.compute_b_sym(Z, K, sigma) K = gaussian_kernel(Z, sigma=sigma) J_sym = develop_gaussian.objective_sym(Z, sigma, lmbda, alpha, K, b, C) J = gaussian.objective(Z, Z, sigma, lmbda, alpha, K_XY=K, b=b, C=C) assert_equal(J, J_sym)
def test_objective_sym_matches_full(): sigma = 1. lmbda = 1. Z = np.random.randn(100, 2) K = gaussian_kernel(Z, sigma=sigma) a_opt = develop_gaussian.fit_sym(Z, sigma, lmbda, K) J_opt = develop_gaussian.objective_sym(Z, sigma, lmbda, a_opt, K) L = incomplete_cholesky_gaussian(Z, sigma, eta=0.01)["R"].T a_opt_chol = develop_gaussian_low_rank.fit_sym(Z, sigma, lmbda, L) J_opt_chol = develop_gaussian_low_rank.objective_sym( Z, sigma, lmbda, a_opt_chol, L) assert_almost_equal(J_opt, J_opt_chol, delta=2.)