示例#1
0
 def second_order_grad(self, x):
     g2 = np.sum(gaussian_kernel_dx_dx(x, self.X, sigma=self.bandwidth),
                 axis=0) / np.sum(gaussian_kernel(
                     x[None, :], self.X, sigma=self.bandwidth),
                                  axis=-1)
     g2 -= self.grad(x)**2
     return g2
示例#2
0
def test_gaussian_kernel_dx_dx_component_equals_gaussian_kernel_dx_dx():
    D = 4
    x = np.random.randn(D)
    y = np.random.randn(D)
    sigma = 0.5

    dx_dx = gaussian_kernel_dx_dx(x, np.atleast_2d(y), sigma)[0]
    for i in range(D):
        dxi = gaussian_kernel_dx_dx_component(x, y, i, sigma)
        assert_allclose(dx_dx[i], dxi)
示例#3
0
def test_gaussian_kernel_dx_dx_equals_SE_dx_dx():
    D = 4
    x = np.random.randn(D)
    Y = np.random.randn(1, D)
    sigma = 0.5

    implementation = gaussian_kernel_dx_dx(x, Y, sigma)
    reference = SE_dx_dx(x.reshape(-1, 1), Y.T, l=np.sqrt(sigma / 2.0))

    assert_close(implementation, reference.T)
示例#4
0
def log_pdf_naive(x, X, sigma, alpha, beta):
    N, D = X.shape

    xi = 0
    betasum = 0
    for a in range(N):
        x_a = np.atleast_2d(X[a, :])
        gradient_x_xa = np.squeeze(gaussian_kernel_grad(x, x_a, sigma))
        xi_grad = np.squeeze(gaussian_kernel_dx_dx(x, x_a, sigma))
        for i in range(D):
            xi += xi_grad[i] / N
            betasum += gradient_x_xa[i] * beta[a, i]

    return alpha * xi + betasum
示例#5
0
def test_gaussian_kernel_dx_dx_multiple_ys():
    D = 4
    N = 3
    x = np.random.randn(D)
    Y = np.random.randn(N, D)
    sigma = 0.5

    implementation = gaussian_kernel_dx_dx(x, Y, sigma)

    for i in range(N):
        reference = SE_dx_dx(x.reshape(-1, 1),
                             Y[i].reshape(-1, 1),
                             l=np.sqrt(sigma / 2.0))

        assert_close(implementation[i], np.squeeze(reference.T))
示例#6
0
def log_pdf(x, basis, sigma, alpha, beta):
    m, D = basis.shape
    assert_array_shape(x, ndim=1, dims={0: D})

    SE_dx_dx_l = lambda x, y: gaussian_kernel_dx_dx(x, y.reshape(1, -1), sigma)
    SE_dx_l = lambda x, y: gaussian_kernel_grad(x, y.reshape(1, -1), sigma)

    xi = 0
    betasum = 0
    for a in range(m):
        x_a = basis[a]
        xi += np.sum(SE_dx_dx_l(x, x_a)) / m
        gradient_x_xa = np.squeeze(SE_dx_l(x, x_a))
        betasum += np.dot(gradient_x_xa, beta[a, :])

    return np.float(alpha * xi + betasum)