예제 #1
0
def test_gaussian_kernel_dx_component_equals_grad():
    D = 4
    x = np.random.randn(D)
    y = np.random.randn(D)
    sigma = 0.5

    grad = gaussian_kernel_grad(x, np.atleast_2d(y), sigma)[0]
    for i in range(D):
        dxi = gaussian_kernel_dx_component(x, y, i, sigma)
        assert_allclose(grad[i], dxi)
예제 #2
0
 def grad(self, x):
     assert_array_shape(x, ndim=1, dims={0: self.D})
     # now x is of shape (D,)
     # assume M datapoints in x
     Kxx = 1	# should be a scalar: Kxx = exp(-(x-x)**2 / self.sigma) = 1
     KxX = gaussian_kernel(x[np.newaxis, :], self.X, sigma=self.sigma)	 # shape (1, K)
     xX_grad = gaussian_kernel_grad(x, self.X, self.sigma)	# should be shape (K, D)
     tmp = np.dot(KxX, self.K_inv)	# should be of shape (1, K)
     A = Kxx + self.lmbda - np.sum(tmp * KxX)	# should be a scalar
     B = np.dot(KxX, self.X_grad) - np.dot(tmp + 1, xX_grad)		# shape (1, D) 
     gradient = -B[0] / A	# shape (D,)
     return gradient
 def grad(self, x):
     if x.ndim == 1:
         g = np.sum(gaussian_kernel_grad(x, self.X, sigma=self.bandwidth),
                    axis=0) / np.sum(gaussian_kernel(
                        x[None, :], self.X, sigma=self.bandwidth),
                                     axis=-1)
         return g
     else:
         grads = []
         for i in xrange(x.shape[0]):
             g_i = self.grad(x[i])
             grads.append(g_i)
         return np.asarray(grads)
예제 #4
0
def log_pdf_naive(x, X, sigma, alpha, beta):
    N, D = X.shape

    xi = 0
    betasum = 0
    for a in range(N):
        x_a = np.atleast_2d(X[a, :])
        gradient_x_xa = np.squeeze(gaussian_kernel_grad(x, x_a, sigma))
        xi_grad = np.squeeze(gaussian_kernel_dx_dx(x, x_a, sigma))
        for i in range(D):
            xi += xi_grad[i] / N
            betasum += gradient_x_xa[i] * beta[a, i]

    return alpha * xi + betasum
예제 #5
0
def test_gaussian_kernel_grad_theano_result_equals_manual():
    if not theano_available:
        raise SkipTest("Theano not available")
    
    D = 3
    x = np.random.randn(D)
    y = np.random.randn(D)
    sigma = 2.
    
    grad = gaussian_kernel_grad_theano(x, y, sigma)
    grad_manual = gaussian_kernel_grad(x, y[np.newaxis, :], sigma)[0]
    print grad_manual
    print grad
    
    assert_allclose(grad, grad_manual)
예제 #6
0
def test_gaussian_kernel_grad_theano_result_equals_manual():
    if not theano_available:
        raise SkipTest("Theano not available")

    D = 3
    x = np.random.randn(D)
    y = np.random.randn(D)
    sigma = 2.

    grad = gaussian_kernel_grad_theano(x, y, sigma)
    grad_manual = gaussian_kernel_grad(x, y[np.newaxis, :], sigma)[0]
    print grad_manual
    print grad

    assert_allclose(grad, grad_manual)
예제 #7
0
def log_pdf(x, basis, sigma, alpha, beta):
    m, D = basis.shape
    assert_array_shape(x, ndim=1, dims={0: D})

    SE_dx_dx_l = lambda x, y: gaussian_kernel_dx_dx(x, y.reshape(1, -1), sigma)
    SE_dx_l = lambda x, y: gaussian_kernel_grad(x, y.reshape(1, -1), sigma)

    xi = 0
    betasum = 0
    for a in range(m):
        x_a = basis[a]
        xi += np.sum(SE_dx_dx_l(x, x_a)) / m
        gradient_x_xa = np.squeeze(SE_dx_l(x, x_a))
        betasum += np.dot(gradient_x_xa, beta[a, :])

    return np.float(alpha * xi + betasum)
예제 #8
0
    def grad(self, x):
        assert_array_shape(x, ndim=1, dims={0: self.D})

        k = gaussian_kernel_grad(x, self.X, self.sigma)
        return np.dot(self.alpha, k)
예제 #9
0
    def grad(self, x):
        assert_array_shape(x, ndim=1, dims={0: self.D})

        k = gaussian_kernel_grad(x, self.X, self.sigma)
        return np.dot(self.alpha, k)
예제 #10
0
 def grad(self, x):
     g = np.sum(gaussian_kernel_grad(x, self.X, sigma=self.bandwidth),
                axis=0) / np.sum(gaussian_kernel(
                    x[None, :], self.X, sigma=self.bandwidth),
                                 axis=-1)
     return g