예제 #1
0
    def log_likelihood(self, xy):
        assert isinstance(xy, (tuple, np.ndarray))
        A, sigma, D = self.A, self.sigma, self.D_out
        x, y = (xy[:, :-D], xy[:, -D:]) if isinstance(xy, np.ndarray) else xy

        if self.affine:
            A, b = A[:, :-1], A[:, -1]

        sigma_inv, L = inv_psd(sigma, return_chol=True)
        parammat = -1. / 2 * blockarray([[
            A.T.dot(sigma_inv).dot(A), -A.T.dot(sigma_inv)
        ], [-sigma_inv.dot(A), sigma_inv]])

        contract = 'ni,ni->n' if x.ndim == 2 else 'i,i->'
        if isinstance(xy, np.ndarray):
            out = np.einsum(contract, xy.dot(parammat), xy)
        else:
            out = np.einsum(contract, x.dot(parammat[:-D, :-D]), x)
            out += np.einsum(contract, y.dot(parammat[-D:, -D:]), y)
            out += 2 * np.einsum(contract, x.dot(parammat[:-D, -D:]), y)

        out -= D / 2 * np.log(2 * np.pi) + np.log(np.diag(L)).sum()

        if self.affine:
            out += y.dot(sigma_inv).dot(b)
            out -= x.dot(A.T).dot(sigma_inv).dot(b)
            out -= 1. / 2 * b.dot(sigma_inv).dot(b)

        return out
예제 #2
0
 def _standard_to_natural(nu, S, M, K):
     Kinv = inv_psd(K)
     A = S + M.dot(Kinv).dot(M.T)
     B = M.dot(Kinv)
     C = Kinv
     d = nu
     return np.array([A, B, C, d])
예제 #3
0
 def _standard_to_natural(nu,S,M,K):
     Kinv = inv_psd(K)
     A = S + M.dot(Kinv).dot(M.T)
     B = M.dot(Kinv)
     C = Kinv
     d = nu
     return np.array([A,B,C,d])
예제 #4
0
    def log_likelihood(self,xy):
        assert isinstance(xy, (tuple,np.ndarray))
        A, sigma, D = self.A, self.sigma, self.D_out
        x, y = (xy[:,:-D], xy[:,-D:]) if isinstance(xy,np.ndarray) else xy

        if self.affine:
            A, b = A[:,:-1], A[:,-1]

        sigma_inv, L = inv_psd(sigma, return_chol=True)
        parammat = -1./2 * blockarray([
            [A.T.dot(sigma_inv).dot(A), -A.T.dot(sigma_inv)],
            [-sigma_inv.dot(A), sigma_inv]])

        contract = 'ni,ni->n' if x.ndim == 2 else 'i,i->'
        if isinstance(xy, np.ndarray):
            out = np.einsum(contract,xy.dot(parammat),xy)
        else:
            out = np.einsum(contract,x.dot(parammat[:-D,:-D]),x)
            out += np.einsum(contract,y.dot(parammat[-D:,-D:]),y)
            out += 2*np.einsum(contract,x.dot(parammat[:-D,-D:]),y)

        out -= D/2*np.log(2*np.pi) + np.log(np.diag(L)).sum()

        if self.affine:
            out += y.dot(sigma_inv).dot(b)
            out -= x.dot(A.T).dot(sigma_inv).dot(b)
            out -= 1./2*b.dot(sigma_inv).dot(b)

        return out
예제 #5
0
    def _resample_precision(self, data):
        assert isinstance(data, (list, tuple, np.ndarray))
        if isinstance(data, list):
            return [self._resample_precision(d) for d in data]

        elif isinstance(data, tuple):
            x, y = data

        else:
            x, y = data[:, :-self.D_out], data[:, -self.D_out:]

        assert x.ndim == y.ndim == 2
        assert x.shape[0] == y.shape[0]
        assert x.shape[1] == self.D_in - 1 if self.affine else self.D_in
        assert y.shape[1] == self.D_out
        N = x.shape[0]

        # Weed out the nan's
        bad = np.any(np.isnan(x), axis=1) | np.any(np.isnan(y), axis=1)

        # Compute posterior params of gamma distribution
        a_post = self.nu / 2.0 + self.D_out / 2.0

        r = y - self.predict(x)
        sigma_inv = inv_psd(self.sigma)
        z = sigma_inv.dot(r.T).T
        b_post = self.nu / 2.0 + (r * z).sum(1) / 2.0

        assert np.isscalar(a_post) and b_post.shape == (N,)
        tau = np.nan * np.ones(N)
        tau[~bad] = np.random.gamma(a_post, 1./b_post[~bad])

        return tau
예제 #6
0
    def log_likelihood(self,xy):
        assert isinstance(xy, (tuple, np.ndarray))
        sigma, D, nu = self.sigma, self.D_out, self.nu
        x, y = (xy[:,:-D], xy[:,-D:]) if isinstance(xy,np.ndarray) else xy

        sigma_inv, L = inv_psd(sigma, return_chol=True)
        r = y - self.predict(x)
        z = sigma_inv.dot(r.T).T

        out = -0.5 * (nu + D) * np.log(1.0 + (r * z).sum(1) / nu)
        out += gammaln((nu + D) / 2.0) - gammaln(nu / 2.0) - D / 2.0 * np.log(nu) \
            - D / 2.0 * np.log(np.pi) - np.log(np.diag(L)).sum()

        return out
예제 #7
0
    def _natural_to_standard(natparam):
        A,B,C,d = natparam
        nu = d
        Kinv = C
        K = inv_psd(Kinv)
        M = B.dot(K)
        S = A - B.dot(K).dot(B.T)

        # numerical padding here...
        K += 1e-8*np.eye(K.shape[0])
        assert np.all(0 < np.linalg.eigvalsh(S))
        assert np.all(0 < np.linalg.eigvalsh(K))

        return nu, S, M, K
예제 #8
0
    def _natural_to_standard(natparam):
        A, B, C, d = natparam
        nu = d
        Kinv = C
        K = inv_psd(Kinv)
        M = B.dot(K)
        S = A - B.dot(K).dot(B.T)

        # numerical padding here...
        K += 1e-8 * np.eye(K.shape[0])
        S += 1e-8 * np.eye(S.shape[0])
        assert np.all(0 < np.linalg.eigvalsh(S))
        assert np.all(0 < np.linalg.eigvalsh(K))

        return nu, S, M, K
예제 #9
0
    def _natural_to_standard(natparam):
        A,B,C,d = natparam   # natparam is roughly (yyT, yxT, xxT, n)
        nu = d
        Kinv = C
        K = inv_psd(Kinv)
        # M = B.dot(K)
        M = np.linalg.solve(Kinv, B.T).T
        # This subtraction seems unstable!
        # It does not necessarily return a PSD matrix
        S = A - M.dot(B.T)

        # numerical padding here...
        K += 1e-8*np.eye(K.shape[0])
        S += 1e-8*np.eye(S.shape[0])
        assert np.all(0 < np.linalg.eigvalsh(S))
        assert np.all(0 < np.linalg.eigvalsh(K))

        # standard is degrees of freedom, mean of sigma (ish), mean of A, cov of rows of A
        return nu, S, M, K
예제 #10
0
    def log_likelihood(self,xy):
        A, sigma, D = self.A, self.sigma, self.D_out
        x, y = xy[:,:-D], xy[:,-D:]

        if self.affine:
            A, b = A[:,:-1], A[:,-1]

        sigma_inv, L = inv_psd(sigma, return_chol=True)
        parammat = -1./2 * blockarray([
            [A.T.dot(sigma_inv).dot(A), -A.T.dot(sigma_inv)],
            [-sigma_inv.dot(A), sigma_inv]
        ])
        out = np.einsum('ni,ni->n',xy.dot(parammat),xy)
        out -= D/2*np.log(2*np.pi) + np.log(np.diag(L)).sum()

        if self.affine:
            out += y.dot(sigma_inv).dot(b)
            out -= x.dot(A.T).dot(sigma_inv).dot(b)
            out -= 1./2*b.dot(sigma_inv).dot(b)

        return out