Example #1
0
def objective(X, Y, sigma, lmbda, alpha, K=None, K_XY=None, b=None, C=None):
    # restrict shape
    #if X.shape[0] > 100:
    #    ind = np.random.permutation(range(X.shape[0]))[:100]
    #    X = X[ind]
    if X.shape[0] != Y.shape[0]:
        Y = np.copy(X)
    if K_XY is None:
        K_XY = gaussian_kernel(X, Y, sigma=sigma)
    
    if K is None and lmbda > 0:
        if X is Y:
            K = K_XY
        else:
            K = gaussian_kernel(X, sigma=sigma)
    
    if b is None or C is None:
        b, C = compute_b_and_C(X, Y, K_XY, sigma)
    
    NX = len(X)
    first = 2. / (NX * sigma) * alpha.dot(b)
    if lmbda > 0:
        second = 2. / (NX * sigma ** 2) * alpha.dot(
                                                    (C + (K + np.eye(len(C))) * lmbda).dot(alpha)
                                                    )
    else:
        second = 2. / (NX * sigma ** 2) * alpha.dot((C).dot(alpha))
    J = first + second
    return J
def test_objective_matches_sym_precomputed_KbC():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)
    K = gaussian_kernel(Z, sigma=sigma)

    alpha = np.random.randn(len(Z))
    C = develop_gaussian.compute_C_sym(Z, K, sigma)
    b = develop_gaussian.compute_b_sym(Z, K, sigma)

    K = gaussian_kernel(Z, sigma=sigma)
    J_sym = develop_gaussian.objective_sym(Z, sigma, lmbda, alpha, K, b, C)
    J = gaussian.objective(Z, Z, sigma, lmbda, alpha, K_XY=K, b=b, C=C)

    assert_equal(J, J_sym)
Example #3
0
def test_objective_matches_sym_precomputed_KbC():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)
    K = gaussian_kernel(Z, sigma=sigma)
    
    alpha = np.random.randn(len(Z))
    C = develop_gaussian.compute_C_sym(Z, K, sigma)
    b = develop_gaussian.compute_b_sym(Z, K, sigma)
    
    K = gaussian_kernel(Z, sigma=sigma)
    J_sym = develop_gaussian.objective_sym(Z, sigma, lmbda, alpha, K, b, C)
    J = gaussian.objective(Z, Z, sigma, lmbda, alpha, K_XY=K, b=b, C=C)
    
    assert_equal(J, J_sym)
Example #4
0
def test_compute_C_run_asym():
    sigma = 1.
    X = np.random.randn(100, 2)
    Y = np.random.randn(100, 2)

    K_XY = gaussian_kernel(X, Y, sigma=sigma)
    _ = gaussian.compute_C(X, Y, K_XY, sigma=sigma)
def test_compute_C_run_asym():
    sigma = 1.
    X = np.random.randn(100, 2)
    Y = np.random.randn(100, 2)

    K_XY = gaussian_kernel(X, Y, sigma=sigma)
    _ = gaussian.compute_C(X, Y, K_XY, sigma=sigma)
Example #6
0
def test_incomplete_cholesky_1():
    X = np.arange(9.0).reshape(3, 3)
    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=200.)
    temp = incomplete_cholesky(X, kernel, eta=0.8, power=2)
    R, K_chol, I, W = (temp["R"], temp["K_chol"], temp["I"], temp["W"])
    K = kernel(X)

    assert_equal(len(I), 2)
    assert_equal(I[0], 0)
    assert_equal(I[1], 2)

    assert_equal(K_chol.shape, (len(I), len(I)))
    for i in range(len(I)):
        assert_equal(K_chol[i, i], K[I[i], I[i]])

    assert_equal(R.shape, (len(I), len(X)))
    assert_almost_equal(R[0, 0], 1.000000000000000)
    assert_almost_equal(R[0, 1], 0.763379494336853)
    assert_almost_equal(R[0, 2], 0.339595525644939)
    assert_almost_equal(R[1, 0], 0)
    assert_almost_equal(R[1, 1], 0.535992421608228)
    assert_almost_equal(R[1, 2], 0.940571570355992)

    assert_equal(W.shape, (len(I), len(X)))
    assert_almost_equal(W[0, 0], 1.000000000000000)
    assert_almost_equal(W[0, 1], 0.569858199525808)
    assert_almost_equal(W[0, 2], 0)
    assert_almost_equal(W[1, 0], 0)
    assert_almost_equal(W[1, 1], 0.569858199525808)
    assert_almost_equal(W[1, 2], 1)
Example #7
0
def test_incomplete_cholesky_check_given_rank():
    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=20.)
    X = np.random.randn(300, 10)
    eta = 5
    K_chol = incomplete_cholesky(X, kernel, eta=eta)["K_chol"]

    assert_equal(K_chol.shape[0], eta)
Example #8
0
def test_incomplete_cholesky_2():
    X = np.arange(9.0).reshape(3, 3)
    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=8.)
    temp = incomplete_cholesky(X, kernel, eta=0.999)
    R, K_chol, I, W = (temp["R"], temp["K_chol"], temp["I"], temp["W"])
    K = kernel(X)

    assert_equal(len(I), 2)
    assert_equal(I[0], 0)
    assert_equal(I[1], 2)

    assert_equal(K_chol.shape, (len(I), len(I)))
    for i in range(len(I)):
        assert_equal(K_chol[i, i], K[I[i], I[i]])

    assert_equal(R.shape, (len(I), len(X)))
    assert_almost_equal(R[0, 0], 1.000000000000000)
    assert_almost_equal(R[0, 1], 0.034218118311666)
    assert_almost_equal(R[0, 2], 0.000001370959086)
    assert_almost_equal(R[1, 0], 0)
    assert_almost_equal(R[1, 1], 0.034218071400058)
    assert_almost_equal(R[1, 2], 0.999999999999060)

    assert_equal(W.shape, (len(I), len(X)))
    assert_almost_equal(W[0, 0], 1.000000000000000)
    assert_almost_equal(W[0, 1], 0.034218071400090)
    assert_almost_equal(W[0, 2], 0)
    assert_almost_equal(W[1, 0], 0)
    assert_almost_equal(W[1, 1], 0.034218071400090)
    assert_almost_equal(W[1, 2], 1)
 def second_order_grad(self, x):
     g2 = np.sum(gaussian_kernel_dx_dx(x, self.X, sigma=self.bandwidth),
                 axis=0) / np.sum(gaussian_kernel(
                     x[None, :], self.X, sigma=self.bandwidth),
                                  axis=-1)
     g2 -= self.grad(x)**2
     return g2
def test_objective_sym_against_naive():
    sigma = 1.
    D = 2
    N = 10
    Z = np.random.randn(N, D)

    K = gaussian_kernel(Z, sigma=sigma)

    num_trials = 10
    for _ in range(num_trials):
        alpha = np.random.randn(N)

        J_naive_a = 0
        for d in range(D):
            for i in range(N):
                for j in range(N):
                    J_naive_a += alpha[i] * K[i, j] * \
                                (-1 + 2. / sigma * ((Z[i][d] - Z[j][d]) ** 2))
        J_naive_a *= (2. / (N * sigma))

        J_naive_b = 0
        for d in range(D):
            for i in range(N):
                temp = 0
                for j in range(N):
                    temp += alpha[j] * (Z[j, d] - Z[i, d]) * K[i, j]
                J_naive_b += (temp**2)
        J_naive_b *= (2. / (N * (sigma**2)))

        J_naive = J_naive_a + J_naive_b

        # compare to unregularised objective
        lmbda = 0.
        J = develop_gaussian.objective_sym(Z, sigma, lmbda, alpha, K)
        assert_close(J_naive, J)
def test_incomplete_cholesky_2():
    X = np.arange(9.0).reshape(3, 3)
    kernel = lambda X, Y = None : gaussian_kernel(X, Y, sigma=8.)
    temp = incomplete_cholesky(X, kernel, eta=0.999)
    R, K_chol, I, W = (temp["R"], temp["K_chol"], temp["I"], temp["W"])
    K = kernel(X)
     
    assert_equal(len(I), 2)
    assert_equal(I[0], 0)
    assert_equal(I[1], 2)
     
    assert_equal(K_chol.shape, (len(I), len(I)))
    for i in range(len(I)):
        assert_equal(K_chol[i, i], K[I[i], I[i]])
         
    assert_equal(R.shape, (len(I), len(X)))
    assert_almost_equal(R[0, 0], 1.000000000000000)
    assert_almost_equal(R[0, 1], 0.034218118311666)
    assert_almost_equal(R[0, 2], 0.000001370959086)
    assert_almost_equal(R[1, 0], 0)
    assert_almost_equal(R[1, 1], 0.034218071400058)
    assert_almost_equal(R[1, 2], 0.999999999999060)
     
    assert_equal(W.shape, (len(I), len(X)))
    assert_almost_equal(W[0, 0], 1.000000000000000)
    assert_almost_equal(W[0, 1], 0.034218071400090)
    assert_almost_equal(W[0, 2], 0)
    assert_almost_equal(W[1, 0], 0)
    assert_almost_equal(W[1, 1], 0.034218071400090)
    assert_almost_equal(W[1, 2], 1)
Example #12
0
def test_objective_sym_against_naive():
    sigma = 1.
    D = 2
    N = 10
    Z = np.random.randn(N, D)
    
    K = gaussian_kernel(Z, sigma=sigma)
    
    num_trials = 10
    for _ in range(num_trials):
        alpha = np.random.randn(N)
        
        J_naive_a = 0
        for d in range(D):
            for i in range(N):
                for j in range(N):
                    J_naive_a += alpha[i] * K[i, j] * \
                                (-1 + 2. / sigma * ((Z[i][d] - Z[j][d]) ** 2))
        J_naive_a *= (2. / (N * sigma))
        
        J_naive_b = 0
        for d in range(D):
            for i in range(N):
                temp = 0
                for j in range(N):
                    temp += alpha[j] * (Z[j, d] - Z[i, d]) * K[i, j]
                J_naive_b += (temp ** 2)
        J_naive_b *= (2. / (N * (sigma ** 2)))
        
        J_naive = J_naive_a + J_naive_b
        
        # compare to unregularised objective
        lmbda = 0.
        J = develop_gaussian.objective_sym(Z, sigma, lmbda, alpha, K)
        assert_close(J_naive, J)
def test_incomplete_cholesky_check_given_rank():
    kernel = lambda X, Y = None : gaussian_kernel(X, Y, sigma=20.)
    X = np.random.randn(300, 10)
    eta = 5
    K_chol = incomplete_cholesky(X, kernel, eta=eta)["K_chol"]
    
    assert_equal(K_chol.shape[0], eta)
def test_incomplete_cholesky_1():
    X = np.arange(9.0).reshape(3, 3)
    kernel = lambda X, Y = None : gaussian_kernel(X, Y, sigma=200.)
    temp = incomplete_cholesky(X, kernel, eta=0.8, power=2)
    R, K_chol, I, W = (temp["R"], temp["K_chol"], temp["I"], temp["W"])
    K = kernel(X)
    
    
    assert_equal(len(I), 2)
    assert_equal(I[0], 0)
    assert_equal(I[1], 2)
    
    assert_equal(K_chol.shape, (len(I), len(I)))
    for i in range(len(I)):
        assert_equal(K_chol[i, i], K[I[i], I[i]])
        
    assert_equal(R.shape, (len(I), len(X)))
    assert_almost_equal(R[0, 0], 1.000000000000000)
    assert_almost_equal(R[0, 1], 0.763379494336853)
    assert_almost_equal(R[0, 2], 0.339595525644939)
    assert_almost_equal(R[1, 0], 0)
    assert_almost_equal(R[1, 1], 0.535992421608228)
    assert_almost_equal(R[1, 2], 0.940571570355992)
    
    assert_equal(W.shape, (len(I), len(X)))
    assert_almost_equal(W[0, 0], 1.000000000000000)
    assert_almost_equal(W[0, 1], 0.569858199525808)
    assert_almost_equal(W[0, 2], 0)
    assert_almost_equal(W[1, 0], 0)
    assert_almost_equal(W[1, 1], 0.569858199525808)
    assert_almost_equal(W[1, 2], 1)
def test_compute_C_matches_sym():
    sigma = 1.
    Z = np.random.randn(10, 2)

    K = gaussian_kernel(Z, sigma=sigma)
    C_sym = develop_gaussian.compute_C_sym(Z, K, sigma=sigma)
    C = gaussian.compute_C(Z, Z, K, sigma=sigma)
    assert_allclose(C, C_sym)
Example #16
0
def incomplete_cholesky_new_point_gaussian(X,
                                           x,
                                           sigma,
                                           I=None,
                                           R=None,
                                           nu=None):
    kernel = lambda X, Y: gaussian_kernel(X, Y, sigma=sigma)
    return incomplete_cholesky_new_point(X, x, kernel, I, R, nu)
Example #17
0
def test_compute_b_matches_sym():
    sigma = 1.
    Z = np.random.randn(10, 2)
    
    K = gaussian_kernel(Z, sigma=sigma)
    b = develop_gaussian.compute_b_sym(Z, K, sigma=sigma)
    b_sym = gaussian.compute_b(Z, Z, K, sigma=sigma)
    assert_allclose(b, b_sym)
def test_compute_C_matches_sym():
    sigma = 1.
    Z = np.random.randn(10, 2)
    
    K = gaussian_kernel(Z, sigma=sigma)
    C_sym = develop_gaussian.compute_C_sym(Z, K, sigma=sigma)
    C = gaussian.compute_C(Z, Z, K, sigma=sigma)
    assert_allclose(C, C_sym)
def test_compute_b_matches_sym():
    sigma = 1.
    Z = np.random.randn(10, 2)

    K = gaussian_kernel(Z, sigma=sigma)
    b = develop_gaussian.compute_b_sym(Z, K, sigma=sigma)
    b_sym = gaussian.compute_b(Z, Z, K, sigma=sigma)
    assert_allclose(b, b_sym)
Example #20
0
 def fit_wrapper_(self):
     self.K = gaussian_kernel(self.X, sigma=self.sigma)
     return fit(X=self.X,
                Y=self.X,
                sigma=self.sigma,
                lmbda=self.lmbda,
                K=self.K,
                reg_f_norm=self.reg_f_norm,
                reg_alpha_norm=self.reg_alpha_norm)
Example #21
0
    def fit_wrapper_(self):
        K = gaussian_kernel(self.X, sigma=self.sigma)	 # shape (K, K)
        self.K_inv = np.linalg.inv(K + self.lmbda * np.eye(K.shape[0]))	# shape (K, K)
        # here self.sigma equiv to 2*sigma**2 in the paper
        sumK = np.sum(K, axis=1)[:, np.newaxis]	# shape (K, 1)
        self.X_grad = -2 / self.sigma * (-self.X + np.dot(self.K_inv, self.X * sumK))

        # also fit alpha, but not used for gradients
        self.alpha = fit(self.X, self.X, self.sigma, self.lmbda, K)
def test_compute_b_sym_matches_full():
    sigma = 1.
    Z = np.random.randn(100, 2)
    low_rank_dim = int(len(Z) * .9)
    K = gaussian_kernel(Z, sigma=sigma)
    R = incomplete_cholesky_gaussian(Z, sigma, eta=low_rank_dim)["R"]
    
    x = develop_gaussian.compute_b_sym(Z, K, sigma)
    y = develop_gaussian_low_rank.compute_b_sym(Z, R.T, sigma)
    assert_allclose(x, y, atol=5e-1)
Example #23
0
def test_score_matching_objective_matches_sym():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)
    
    K = gaussian_kernel(Z, sigma=sigma)
    J_sym = develop_gaussian.fit_sym(Z, sigma, lmbda, K)
    J = gaussian.fit(Z, Z, sigma, lmbda, K)
    
    assert_allclose(J, J_sym)
def test_compute_b_sym_matches_full():
    sigma = 1.
    Z = np.random.randn(100, 2)
    low_rank_dim = int(len(Z) * .9)
    K = gaussian_kernel(Z, sigma=sigma)
    R = incomplete_cholesky_gaussian(Z, sigma, eta=low_rank_dim)["R"]

    x = develop_gaussian.compute_b_sym(Z, K, sigma)
    y = develop_gaussian_low_rank.compute_b_sym(Z, R.T, sigma)
    assert_allclose(x, y, atol=5e-1)
def test_score_matching_objective_matches_sym():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)

    K = gaussian_kernel(Z, sigma=sigma)
    J_sym = develop_gaussian.fit_sym(Z, sigma, lmbda, K)
    J = gaussian.fit(Z, Z, sigma, lmbda, K)

    assert_allclose(J, J_sym)
def test_incomplete_cholesky_new_point():
    kernel = lambda X, Y = None : gaussian_kernel(X, Y, sigma=200.)
    X = np.random.randn(1000, 10)
    low_rank_dim = 15
    temp = incomplete_cholesky(X, kernel, eta=low_rank_dim)
    R, I, nu = (temp["R"], temp["I"], temp["nu"])
    
    # construct train-train kernel matrix approximation using one by one calls
    for i in range(low_rank_dim):
        r = incomplete_cholesky_new_point(X, X[i], kernel, I, R, nu)
        assert_allclose(r, R[:,i], atol=1e-1)
Example #27
0
def test_incomplete_cholesky_new_point():
    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=200.)
    X = np.random.randn(1000, 10)
    low_rank_dim = 15
    temp = incomplete_cholesky(X, kernel, eta=low_rank_dim)
    R, I, nu = (temp["R"], temp["I"], temp["nu"])

    # construct train-train kernel matrix approximation using one by one calls
    for i in range(low_rank_dim):
        r = incomplete_cholesky_new_point(X, X[i], kernel, I, R, nu)
        assert_allclose(r, R[:, i], atol=1e-1)
Example #28
0
def fit(X, Y, sigma, lmbda, K=None):
    # compute kernel matrix if needed
    if K is None:
        K = gaussian_kernel(X, Y, sigma=sigma)

    b = compute_b(X, Y, K, sigma)
    C = compute_C(X, Y, K, sigma)

    # solve regularised linear system
    a = -sigma / 2.0 * np.linalg.solve(C + (K + np.eye(len(C))) * lmbda, b)

    return a
Example #29
0
 def grad(self, x):
     assert_array_shape(x, ndim=1, dims={0: self.D})
     # now x is of shape (D,)
     # assume M datapoints in x
     Kxx = 1	# should be a scalar: Kxx = exp(-(x-x)**2 / self.sigma) = 1
     KxX = gaussian_kernel(x[np.newaxis, :], self.X, sigma=self.sigma)	 # shape (1, K)
     xX_grad = gaussian_kernel_grad(x, self.X, self.sigma)	# should be shape (K, D)
     tmp = np.dot(KxX, self.K_inv)	# should be of shape (1, K)
     A = Kxx + self.lmbda - np.sum(tmp * KxX)	# should be a scalar
     B = np.dot(KxX, self.X_grad) - np.dot(tmp + 1, xX_grad)		# shape (1, D) 
     gradient = -B[0] / A	# shape (D,)
     return gradient
Example #30
0
def objective(X, Y, sigma, lmbda, alpha, K=None, K_XY=None, b=None, C=None):
    if K_XY is None:
        K_XY = gaussian_kernel(X, Y, sigma=sigma)

    if K is None and lmbda > 0:
        if X is Y:
            K = K_XY
        else:
            K = gaussian_kernel(X, sigma=sigma)

    if b is None:
        b = compute_b(X, Y, K_XY, sigma)

    if C is None:
        C = compute_C(X, Y, K_XY, sigma)

    NX = len(X)
    first = 2. / (NX * sigma) * alpha.dot(b)
    second = 2. / (NX * sigma**2) * alpha.dot((C).dot(alpha))
    J = first + second
    return J
Example #31
0
def test_compute_b_sym_against_paper():
    sigma = 1.
    D = 1
    Z = np.random.randn(1, D)
    K = gaussian_kernel(Z, sigma=sigma)
    b = develop_gaussian.compute_b_sym(Z, K, sigma)
    
    # compute by hand, well, it's just -k since rest is zero (look at it)
    x = Z[0]
    k = K[0, 0]
    b_paper = 2. / sigma * (k * (x ** 2) + (x ** 2) * k - 2 * x * k * x) - k
    
    assert_equal(b, b_paper)
def test_incomplete_cholesky_3():
    kernel = lambda X, Y = None : gaussian_kernel(X, Y, sigma=200.)
    X = np.random.randn(3000, 10)
    temp = incomplete_cholesky(X, kernel, eta=0.001)
    R, K_chol, I, W = (temp["R"], temp["K_chol"], temp["I"], temp["W"])
    K = kernel(X)
     
    assert_equal(K_chol.shape, (len(I), (len(I))))
    assert_equal(R.shape, (len(I), (len(X))))
    assert_equal(W.shape, (len(I), (len(X))))
     
    assert_less_equal(np.linalg.norm(K - R.T.dot(R)), .6)
    assert_less_equal(np.linalg.norm(K - W.T.dot(K_chol.dot(W))), .6)
Example #33
0
def test_compute_C_sym_against_paper():
    sigma = 1.
    D = 1
    Z = np.random.randn(1, D)
    K = gaussian_kernel(Z, sigma=sigma)
    C = develop_gaussian.compute_C_sym(Z, K, sigma)
    
    # compute by hand, well, it's just zero (look at it)
    x = Z[0]
    k = K[0, 0]
    C_paper = (x * k - k * x) * (k * x - x * k)
    
    assert_equal(C, C_paper)
def test_incomplete_cholesky_asymmetric():
    kernel = lambda X, Y = None : gaussian_kernel(X, Y, sigma=1.)
    X = np.random.randn(1000, 10)
    Y = np.random.randn(100, 10)
    
    low_rank_dim = int(len(X)*0.8)
    temp = incomplete_cholesky(X, kernel, eta=low_rank_dim)
    R, I, nu = (temp["R"], temp["I"], temp["nu"])
    
    # construct train-train kernel matrix approximation using one by one calls
    R_test = incomplete_cholesky_new_points(X, Y, kernel, I, R, nu)
    
    assert_allclose(kernel(X, Y), R.T.dot(R_test), atol=10e-1)
Example #35
0
def fit(X, Y, sigma, lmbda, K=None):

    # compute kernel matrix if needed
    if K is None:
        K = gaussian_kernel(X, Y, sigma=sigma)

    # compute helper matrices
    b, C = compute_b_and_C(X, Y, K, sigma)

    # solve regularised linear system
    a = np.linalg.solve(C + np.eye(len(C)) * lmbda, b)

    return a
Example #36
0
def test_incomplete_cholesky_asymmetric():
    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=1.)
    X = np.random.randn(1000, 10)
    Y = np.random.randn(100, 10)

    low_rank_dim = int(len(X) * 0.8)
    temp = incomplete_cholesky(X, kernel, eta=low_rank_dim)
    R, I, nu = (temp["R"], temp["I"], temp["nu"])

    # construct train-train kernel matrix approximation using one by one calls
    R_test = incomplete_cholesky_new_points(X, Y, kernel, I, R, nu)

    assert_allclose(kernel(X, Y), R.T.dot(R_test), atol=10e-1)
Example #37
0
def test_gaussian_kernel_theano_result_equals_manual():
    if not theano_available:
        raise SkipTest("Theano not available")

    D = 3
    x = np.random.randn(D)
    y = np.random.randn(D)
    sigma = 2.

    k = gaussian_kernel_theano(x, y, sigma)
    k_manual = gaussian_kernel(x[np.newaxis, :], y[np.newaxis, :], sigma)[0, 0]

    assert_almost_equal(k, k_manual)
 def grad(self, x):
     if x.ndim == 1:
         g = np.sum(gaussian_kernel_grad(x, self.X, sigma=self.bandwidth),
                    axis=0) / np.sum(gaussian_kernel(
                        x[None, :], self.X, sigma=self.bandwidth),
                                     axis=-1)
         return g
     else:
         grads = []
         for i in xrange(x.shape[0]):
             g_i = self.grad(x[i])
             grads.append(g_i)
         return np.asarray(grads)
def test_apply_C_left_sym_matches_full():
    sigma = 1.
    N = 10
    Z = np.random.randn(N, 2)
    K = gaussian_kernel(Z, sigma=sigma)
    R = incomplete_cholesky_gaussian(Z, sigma, eta=0.1)["R"]
    
    v = np.random.randn(Z.shape[0])
    lmbda = 1.
    
    x = (develop_gaussian.compute_C_sym(Z, K, sigma) + lmbda * (K + np.eye(len(K)))).dot(v)
    y = develop_gaussian_low_rank.apply_left_C_sym(v, Z, R.T, lmbda)
    assert_allclose(x, y, atol=2e-1, rtol=2e-1)
def test_compute_b_sym_against_paper():
    sigma = 1.
    D = 1
    Z = np.random.randn(1, D)
    K = gaussian_kernel(Z, sigma=sigma)
    b = develop_gaussian.compute_b_sym(Z, K, sigma)

    # compute by hand, well, it's just -k since rest is zero (look at it)
    x = Z[0]
    k = K[0, 0]
    b_paper = 2. / sigma * (k * (x**2) + (x**2) * k - 2 * x * k * x) - k

    assert_equal(b, b_paper)
def test_compute_C_sym_against_paper():
    sigma = 1.
    D = 1
    Z = np.random.randn(1, D)
    K = gaussian_kernel(Z, sigma=sigma)
    C = develop_gaussian.compute_C_sym(Z, K, sigma)

    # compute by hand, well, it's just zero (look at it)
    x = Z[0]
    k = K[0, 0]
    C_paper = (x * k - k * x) * (k * x - x * k)

    assert_equal(C, C_paper)
def test_objective_sym_optimum():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)

    K = gaussian_kernel(Z, sigma=sigma)
    a = develop_gaussian.fit_sym(Z, sigma, lmbda, K)
    J_opt = develop_gaussian.objective_sym(Z, sigma, lmbda, a, K)

    for _ in range(10):
        a_random = np.random.randn(len(Z))
        J = develop_gaussian.objective_sym(Z, sigma, lmbda, a_random, K)
        assert J >= J_opt
def test_objective_sym_same_as_from_estimation():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)

    K = gaussian_kernel(Z, sigma=sigma)
    a = develop_gaussian.fit_sym(Z, sigma, lmbda, K)
    C = develop_gaussian.compute_C_sym(Z, K, sigma)
    b = develop_gaussian.compute_b_sym(Z, K, sigma)
    J = develop_gaussian.objective_sym(Z, sigma, lmbda, a, K, b, C)

    J2 = develop_gaussian.objective_sym(Z, sigma, lmbda, a, K)
    assert_almost_equal(J, J2)
Example #44
0
def test_incomplete_cholesky_3():
    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=200.)
    X = np.random.randn(3000, 10)
    temp = incomplete_cholesky(X, kernel, eta=0.001)
    R, K_chol, I, W = (temp["R"], temp["K_chol"], temp["I"], temp["W"])
    K = kernel(X)

    assert_equal(K_chol.shape, (len(I), (len(I))))
    assert_equal(R.shape, (len(I), (len(X))))
    assert_equal(W.shape, (len(I), (len(X))))

    assert_less_equal(np.linalg.norm(K - R.T.dot(R)), .6)
    assert_less_equal(np.linalg.norm(K - W.T.dot(K_chol.dot(W))), .6)
Example #45
0
def test_objective_sym_optimum():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)
    
    K = gaussian_kernel(Z, sigma=sigma)
    a = develop_gaussian.fit_sym(Z, sigma, lmbda, K)
    J_opt = develop_gaussian.objective_sym(Z, sigma, lmbda, a, K)
    
    for _ in range(10):
        a_random = np.random.randn(len(Z))
        J = develop_gaussian.objective_sym(Z, sigma, lmbda, a_random, K)
        assert J >= J_opt
Example #46
0
def test_objective_sym_same_as_from_estimation():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)
    
    K = gaussian_kernel(Z, sigma=sigma)
    a = develop_gaussian.fit_sym(Z, sigma, lmbda, K)
    C = develop_gaussian.compute_C_sym(Z, K, sigma)
    b = develop_gaussian.compute_b_sym(Z, K, sigma)
    J = develop_gaussian.objective_sym(Z, sigma, lmbda, a, K, b, C)
    
    J2 = develop_gaussian.objective_sym(Z, sigma, lmbda, a, K)
    assert_almost_equal(J, J2)
Example #47
0
def test_gaussian_kernel_theano_result_equals_manual():
    if not theano_available:
        raise SkipTest("Theano not available")
    
    D = 3
    x = np.random.randn(D)
    y = np.random.randn(D)
    sigma = 2.
    
    k = gaussian_kernel_theano(x, y, sigma)
    k_manual = gaussian_kernel(x[np.newaxis, :], y[np.newaxis, :], sigma)[0, 0]
    
    assert_almost_equal(k, k_manual)
def test_apply_C_left_sym_matches_full():
    sigma = 1.
    N = 10
    Z = np.random.randn(N, 2)
    K = gaussian_kernel(Z, sigma=sigma)
    R = incomplete_cholesky_gaussian(Z, sigma, eta=0.1)["R"]

    v = np.random.randn(Z.shape[0])
    lmbda = 1.

    x = (develop_gaussian.compute_C_sym(Z, K, sigma) + lmbda *
         (K + np.eye(len(K)))).dot(v)
    y = develop_gaussian_low_rank.apply_left_C_sym(v, Z, R.T, lmbda)
    assert_allclose(x, y, atol=2e-1, rtol=2e-1)
def test_objective_sym_matches_full():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)
    
    K = gaussian_kernel(Z, sigma=sigma)
    a_opt = develop_gaussian.fit_sym(Z, sigma, lmbda, K)
    J_opt = develop_gaussian.objective_sym(Z, sigma, lmbda, a_opt, K)
    
    L = incomplete_cholesky_gaussian(Z, sigma, eta=0.01)["R"].T
    a_opt_chol = develop_gaussian_low_rank.fit_sym(Z, sigma, lmbda, L)
    J_opt_chol = develop_gaussian_low_rank.objective_sym(Z, sigma, lmbda, a_opt_chol, L)
    
    assert_almost_equal(J_opt, J_opt_chol, delta=2.)
def test_incomplete_cholesky_new_points_euqals_new_point():
    kernel = lambda X, Y = None : gaussian_kernel(X, Y, sigma=200.)
    X = np.random.randn(1000, 10)
    low_rank_dim = 15
    temp = incomplete_cholesky(X, kernel, eta=low_rank_dim)
    R, I, nu = (temp["R"], temp["I"], temp["nu"])
    
    R_test_full = incomplete_cholesky_new_points(X, X, kernel, I, R, nu)

    # construct train-train kernel matrix approximation using one by one calls
    R_test = np.zeros(R.shape)
    for i in range(low_rank_dim):
        R_test[:, i] = incomplete_cholesky_new_point(X, X[i], kernel, I, R, nu)
        assert_allclose(R_test[:, i], R_test_full[:, i], atol=0.001)
Example #51
0
def test_incomplete_cholesky_new_points_euqals_new_point():
    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=200.)
    X = np.random.randn(1000, 10)
    low_rank_dim = 15
    temp = incomplete_cholesky(X, kernel, eta=low_rank_dim)
    R, I, nu = (temp["R"], temp["I"], temp["nu"])

    R_test_full = incomplete_cholesky_new_points(X, X, kernel, I, R, nu)

    # construct train-train kernel matrix approximation using one by one calls
    R_test = np.zeros(R.shape)
    for i in range(low_rank_dim):
        R_test[:, i] = incomplete_cholesky_new_point(X, X[i], kernel, I, R, nu)
        assert_allclose(R_test[:, i], R_test_full[:, i], atol=0.001)
def test_compute_b_matches_full():
    sigma = 1.
    X = np.random.randn(100, 2)
    Y = np.random.randn(50, 2)
    
    low_rank_dim = int(len(X) * 0.9)
    kernel = lambda X, Y: gaussian_kernel(X, Y, sigma=sigma)
    K_XY = kernel(X, Y)
    temp = incomplete_cholesky(X, kernel, eta=low_rank_dim)
    I, R, nu = (temp["I"], temp["R"], temp["nu"])
    R_test = incomplete_cholesky_new_points(X, Y, kernel, I, R, nu)
    
    x = gaussian.compute_b(X, Y, K_XY, sigma)
    y = gaussian_low_rank.compute_b(X, Y, R.T, R_test.T, sigma)
    assert_allclose(x, y, atol=5e-1)
Example #53
0
def objective(X, Y, sigma, lmbda, alpha, K=None, K_XY=None, b=None, C=None):
    if K_XY is None:
        K_XY = gaussian_kernel(X, Y, sigma=sigma)

    if K is None and lmbda > 0:
        if X is Y:
            K = K_XY
        else:
            K = gaussian_kernel(X, sigma=sigma)

    if b is None:
        b = compute_b(X, Y, K_XY, sigma)

    if C is None:
        C = compute_C(X, Y, K_XY, sigma)

    NX = len(X)
    first = 2.0 / (NX * sigma) * alpha.dot(b)
    if lmbda > 0:
        second = 2.0 / (NX * sigma ** 2) * alpha.dot((C + (K + np.eye(len(C))) * lmbda).dot(alpha))
    else:
        second = 2.0 / (NX * sigma ** 2) * alpha.dot((C).dot(alpha))
    J = first + second
    return J
def test_compute_b_matches_full():
    sigma = 1.
    X = np.random.randn(100, 2)
    Y = np.random.randn(50, 2)

    low_rank_dim = int(len(X) * 0.9)
    kernel = lambda X, Y: gaussian_kernel(X, Y, sigma=sigma)
    K_XY = kernel(X, Y)
    temp = incomplete_cholesky(X, kernel, eta=low_rank_dim)
    I, R, nu = (temp["I"], temp["R"], temp["nu"])
    R_test = incomplete_cholesky_new_points(X, Y, kernel, I, R, nu)

    x = gaussian.compute_b(X, Y, K_XY, sigma)
    y = gaussian_low_rank.compute_b(X, Y, R.T, R_test.T, sigma)
    assert_allclose(x, y, atol=5e-1)
Example #55
0
def fit_sym(Z, sigma, lmbda, K=None, b=None, C=None):
        # compute quantities
        if K is None:
            K = gaussian_kernel(Z, sigma=sigma)
        
        if b is None:
            b = compute_b_sym(Z, K, sigma)
        
        if C is None:
            C = compute_C_sym(Z, K, sigma)

        # solve regularised linear system
        a = -sigma / 2. * np.linalg.solve(C + (K + np.eye(len(C))) * lmbda,
                                          b)
        
        return a
def test_fit_matches_sym():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)
    low_rank_dim = int(len(Z) * .9)
    
    kernel = lambda X, Y: gaussian_kernel(X, Y, sigma=sigma)
    
    temp = incomplete_cholesky(Z, kernel, eta=low_rank_dim)
    I, R, nu = (temp["I"], temp["R"], temp["nu"])
    R_test = incomplete_cholesky_new_points(Z, Z, kernel, I, R, nu)
    
    a = gaussian_low_rank.fit(Z, Z, sigma, lmbda, R.T, R_test.T)
    a_sym = develop_gaussian_low_rank.fit_sym(Z, sigma, lmbda, R.T)
    
    assert_allclose(a, a_sym)
def apply_C_matches_sym():
    sigma = 1.
    N_X = 100
    X = np.random.randn(N_X, 2)
    
    kernel = lambda X, Y: gaussian_kernel(X, Y, sigma=sigma)
    temp = incomplete_cholesky(X, kernel, eta=0.1)
    I, R, nu = (temp["I"], temp["R"], temp["nu"])
    
    R_test = incomplete_cholesky_new_points(X, X, kernel, I, R, nu)
    
    v = np.random.randn(N_X.shape[0])
    lmbda = 1.
    
    x = gaussian_low_rank.apply_left_C(v, X, X, R.T, R_test.T, lmbda)
    y = develop_gaussian_low_rank.apply_left_C_sym(v, X, R.T, lmbda)
    assert_allclose(x, y)
Example #58
0
def objective_sym(Z, sigma, lmbda, alpha, K=None, b=None, C=None):
    if K is None and ((b is None or C is None) or lmbda > 0):
        K = gaussian_kernel(Z, sigma=sigma)
    
    if C is None:
        C = compute_C_sym(Z, K, sigma)
    
    if b is None:
        b = compute_b_sym(Z, K, sigma)
    
    N = len(Z)
    first = 2. / (N * sigma) * alpha.dot(b)
    second = 2. / (N * sigma ** 2) * alpha.dot(
                                               (C + (K + np.eye(len(C))) * lmbda).dot(alpha)
                                               )
    J = first + second
    return J
Example #59
0
def test_compute_C_against_initial_notebook():
    D = 2
    sigma = 1.
    Z = np.random.randn(100, D)
    K = gaussian_kernel(Z, sigma=sigma)
    
    # build matrix expressions from notes
    m = Z.shape[0]
    D = Z.shape[1]
    
    C = np.zeros((m, m))
    for l in np.arange(D):
        x_l = Z[:, l]
        C += (np.diag(x_l).dot(K) - K.dot(np.diag(x_l))).dot(K.dot(np.diag(x_l)) - np.diag(x_l).dot(K))
    
    C_test = develop_gaussian.compute_C_sym(Z, K, sigma)
    
    assert_allclose(C, C_test)
def test_objective_matches_sym():
    sigma = 1.
    lmbda = 1.
    Z = np.random.randn(100, 2)
    
    kernel = lambda X, Y: gaussian_kernel(X, Y, sigma=sigma)
    alpha = np.random.randn(len(Z))
    
    temp = incomplete_cholesky(Z, kernel, eta=0.1)
    I, R, nu = (temp["I"], temp["R"], temp["nu"])
    
    R_test = incomplete_cholesky_new_points(Z, Z, kernel, I, R, nu)
    
    b = gaussian_low_rank.compute_b(Z, Z, R.T, R_test.T, sigma)
    
    J_sym = develop_gaussian_low_rank.objective_sym(Z, sigma, lmbda, alpha, R.T, b)
    J = gaussian_low_rank.objective(Z, Z, sigma, lmbda, alpha, R.T, R_test.T, b)
    
    assert_close(J, J_sym)