Esempio n. 1
0
    def gp0(self, m, s):
        """
        Compute joint predictions for MGP with uncertain inputs.
        """
        assert hasattr(self, "hyp")
        if not hasattr(self, "K"):
            self.cache()

        x = np.atleast_2d(self.inputs)
        y = np.atleast_2d(self.targets)
        n, D = x.shape
        n, E = y.shape

        X = self.hyp
        iK = self.iK
        beta = self.alpha

        m = np.atleast_2d(m)
        inp = x - m

        # Compute the predicted mean and IO covariance.
        iL = np.stack([np.diag(exp(-X[i, :D])) for i in range(E)])
        iN = np.matmul(inp, iL)
        B = iL @ s @ iL + np.eye(D)
        t = np.stack([solve(B[i].T, iN[i].T).T for i in range(E)])
        q = exp(-np.sum(iN * t, 2) / 2)
        qb = q * beta.T
        tiL = np.matmul(t, iL)
        c = exp(2 * X[:, D]) / sqrt(det(B))

        M = np.sum(qb, 1) * c
        V = (np.transpose(tiL, [0, 2, 1]) @ np.expand_dims(qb, 2)).reshape(
            E, D).T * c
        k = 2 * X[:, D].reshape(E, 1) - np.sum(iN**2, 2) / 2

        # Compute the predicted covariance.
        inp = np.expand_dims(inp, 0) / np.expand_dims(exp(2 * X[:, :D]), 1)
        ii = np.repeat(inp[:, newaxis, :, :], E, 1)
        ij = np.repeat(inp[newaxis, :, :, :], E, 0)

        iL = np.stack([np.diag(exp(-2 * X[i, :D])) for i in range(E)])
        siL = np.expand_dims(iL, 0) + np.expand_dims(iL, 1)
        R = np.matmul(s, siL) + np.eye(D)
        t = 1 / sqrt(det(R))
        iRs = np.stack(
            [solve(R.reshape(-1, D, D)[i], s) for i in range(E * E)])
        iRs = iRs.reshape(E, E, D, D)
        Q = exp(k[:, newaxis, :, newaxis] + k[newaxis, :, newaxis, :] +
                maha(ii, -ij, iRs / 2))

        S = np.einsum('ji,iljk,kl->il', beta, Q, beta)
        tr = np.hstack([np.sum(Q[i, i] * iK[i]) for i in range(E)])
        S = (S - np.diag(tr)) * t + np.diag(exp(2 * X[:, D]))
        S = S - np.matmul(M[:, newaxis], M[newaxis, :])

        return M, S, V
Esempio n. 2
0
def gaussian_init(mu_in, var_in):
    # single Gaussian
    mu, var = mu_in, var_in
    d = len(mu)
    if d == 1:

        def log_gaussian(x):
            log_p_const = -0.5 * np.log(2 * PI) - 0.5 * np.log(var)
            sub_mu = x - mu
            return log_p_const - 0.5 * sub_mu * sub_mu / var

        def generator(size):
            return npr.normal(mu, np.sqrt(var), size)
    else:
        var_det, var_inv = linalg.det(var), linalg.inv(var)
        log_p_const = -(d / 2.) * np.log(2 * PI) - 0.5 * np.log(var_det)

        def log_gaussian(x):
            sub_mu = x - mu
            #out = log_p_const - 0.5*np.sum(np.multiply(sub_mu,np.dot(var_inv,sub_mu.T).T ),1)
            out = log_p_const - 0.5 * np.sum(
                np.multiply(sub_mu, np.dot(sub_mu, var_inv.T)), 1)
            return out

        def generator(size):
            return npr.multivariate_normal(mu, var, size)

    return log_gaussian, generator
Esempio n. 3
0
    def gen_point_source_psf_image(pixel_grid, image, loc,
                                   psf_weights, psf_means, psf_covars):
        # use image PSF
        icovs = np.array([npla.inv(c) for c in psf_covars])
        dets  = np.array([npla.det(c) for c in psf_covars])
        chols = np.array([npla.cholesky(c) for c in psf_covars])

        return mog_like(pixel_grid, psf_means, icovs, dets, psf_weights)
Esempio n. 4
0
    def loss_sat(self, m, s):
        D = len(m)

        W = self.W if hasattr(self, 'W') else np.eye(D)
        z = self.z if hasattr(self, 'z') else np.zeros(D)
        m, z = np.atleast_2d(m), np.atleast_2d(z)

        sW = np.dot(s, W)
        ispW = solve((np.eye(D) + sW).T, W.T).T
        L = -exp(-(m - z) @ ispW @ (m - z).T / 2) / sqrt(det(np.eye(D) + sW))

        i2spW = solve((np.eye(D) + 2 * sW).T, W.T).T
        r2 = exp(-(m - z) @ i2spW @ (m - z).T) / sqrt(det(np.eye(D) + 2 * sW))
        S = r2 - L**2

        t = np.dot(W, z.T) - ispW @ (np.dot(sW, z.T) + m.T)
        C = L * t

        return L + 1, S, C
Esempio n. 5
0
 def update_params(self, means, covs, pis):
     assert covs.shape[1] == covs.shape[2] == self.D
     assert self.K == covs.shape[0] == len(pis), "%d != %d != %d"%(self.K, covs.shape[0], len(pis))
     #assert np.isclose(np.sum(pis), 1.)
     self.means = means
     self.covs  = covs
     self.pis   = pis
     self.dets  = np.array([npla.det(c) for c in self.covs])
     self.icovs = np.array([npla.inv(c) for c in self.covs])
     self.chols = np.array([npla.cholesky(c) for c in self.covs])
Esempio n. 6
0
def logPostFA(W, params):
    X = params['data']
    N = X.shape[0]
    D, K = W.shape
    Q = np.dot(W, W.T) + np.eye(D) * .1
    log_prior = D * K * params['a'] * np.log(params['b']) - D * K * np.log(
        gammaFn(params['a'])) + np.sum(
            (params['a'] - 1) * np.log(W)) - np.sum(params['b'] * W)
    return N / 2. * np.log(det(Q)) - .5 * trace(np.dot(np.dot(X.T, X),
                                                       inv(Q))) + log_prior
Esempio n. 7
0
    def obj_common(lam, alpha):
        # 0th order
        ret_val = (log_likelihood(y, X, w_map, model, lam) +
                   log_prior(w_map, alpha))[0]

        # 2nd order
        #ret_val += 0.5*w_map.shape[0]*ln_two_pi # not related to optimization
        temp_mat2 = lam[0] * hesse_ses[0]
        for i in range(1, dimy):
            temp_mat2 += lam[i] * hesse_ses[i]
        det_hesse = LA.det(temp_mat2 + alpha[0] * np.eye(w_map.shape[0]) +
                           temp_diag)
        ret_val -= 0.5 * np.log(det_hesse)
        return -ret_val
Esempio n. 8
0
def kl_estimate(params,
                layer_sizes,
                n_data,
                N_samples,
                act='rbf',
                kernel='rbf',
                noise=1e-7):
    x = np.random.uniform(-10, 10, size=(n_data, 1))
    y = sample_bnn(params, x, N_samples, layer_sizes, act)  # [nf, nd]
    covariance = kernel_dict[kernel]
    cov = covariance(x, x) + noise * np.eye(x.shape[0])
    print(cov, y.shape, det(cov))
    log_gp = log_gp_prior(y, cov)
    #log_gp = mvn.logpdf(y, np.zeros(y.shape[1]), cov)
    return -entropy_estimate(y) - np.mean(log_gp)
Esempio n. 9
0
    def gen_prof_mog_params(image, loc,
                            gal_sig, gal_rho, gal_phi,
                            psf_weights, psf_means, psf_covars,
                            prof_amp, prof_sig):

        v_s = image.equa2pixel(loc)
        R = galaxies.gen_galaxy_transformation(gal_sig, gal_rho, gal_phi)
        W = np.dot(R, R.T)
        
        K_psf  = psf_weights.shape[0]
        K_prof = prof_amp.shape[0]

        # compute MOG components
        num_components = K_psf * K_prof
        weights = np.zeros(num_components, dtype=np.float)
        means   = np.zeros((num_components, 2), dtype=np.float)
        covars  = np.zeros((num_components, 2, 2), dtype=np.float)
        cnt     = 0
        for k in range(K_psf):                              # num PSF Componenets
            for j in range(K_prof):                         # galaxy type components
                ## compute weights and component mean/variances
                weights[cnt] = psf_weights[k] * prof_amp[j]

                ## compute means
                means[cnt,0] = v_s[0] + psf_means[k, 0]
                means[cnt,1] = v_s[1] + psf_means[k, 1]

                ## compute covariance matrices
                for ii in range(2):
                    for jj in range(2):
                        covars[cnt, ii, jj] = psf_covars[k, ii, jj] + \
                                              prof_sig[j] * W[ii, jj]

                # increment index
                cnt += 1

        icovs = np.array([npla.inv(c) for c in covars])
        dets  = np.array([npla.det(c) for c in covars])
        chols = np.array([npla.cholesky(c) for c in covars])
        return means, covars, icovs, dets, chols, weights
Esempio n. 10
0
def approximate_log_marginal_likelihood(y, X, w_map, model, lam, alpha):
    # see supplementary manuscript for details
    log_ml = log_likelihood(y, X, w_map, model, lam) + log_prior(w_map, alpha)
    log_ml += 0.5 * w_map.shape[0] * ln_two_pi
    log_ml -= 0.5 * np.log(LA.det(hesse_w(y, X, w_map, model, lam, alpha)))
    return np.array(log_ml)
Esempio n. 11
0
 def forward(self, argument, device=None, output_to_retain=None):
     return argument, LA.det(argument)
Esempio n. 12
0
def log_wishart(lam, W, nu, d=2):
    # default dimensionality = 2
    lognum1 = ((nu - d - 1) / 2) * np.log(det(lam))
    lognum2 = -np.trace(np.dot(inv(W), lam)) / 2
    logden1 = (nu * d / 2) * np.log(2) + (nu / 2) * np.log(det(W))
    return lognum1 + lognum2 - logden1 - multigammaln(nu / 2, d)
def E_ln_lam_k(k, nu, W):
    return np.sum(digamma(nu[k] + 1 - np.arange(D) +
                          1)) + D * np.log(2) + np.log(det(W[k]))
def ln_B(W, nu):
    ln_num = -0.5 * nu * np.log(det(W))
    ln_det_1 = 0.5 * nu * D * np.log(2) - D * (D - 1) * 0.25 * np.log(np.pi)
    ln_det_2 = np.sum(gammaln(np.array([0.5 * (nu + 1 - i)
                                        for i in range(D)])))
    return ln_num - ln_det_1 - ln_det_2
Esempio n. 15
0
def multivariate_gaussian_entropy(cov_sqrt):
    cov = np.dot(cov_sqrt, cov_sqrt.T)
    return 0.5 * cov.shape[0] * np.log(2*np.pi*np.exp(1)) + 0.5*np.log(det(cov))