def objective(paramslin, x, z, pij_flatten, pij0sum, run_time, taus, gamma, alpha, g0_params,precomp):
    params = util.unlinearise_params(paramslin, verbose=0)
    d, nz = z.shape
    nx = x.shape[1]
    kzzinv_m = precomp.Kzzinv @ params.m
    s = params.L @ params.L.T+__nugget(params.L.shape[1])
    eqn15sum = (params.m.T @ precomp.Kzzinv_psi_sum_Kzzinv @params.m)[0,0]

    eqn16a = np.trace(precomp.Kzzinv_psi_sum)
    eqn16b = np.trace(precomp.Kzzinv_psi_sum_Kzzinv @ s)
    eqn16sum = gamma*np.sum((run_time-x[0])**d)-eqn16a + eqn16b

    mutilde = (precomp.Kzzinv_kzx.T @ params.m).flatten()
    sigmaa = precomp.sigmas
    sigmab = np.sum(precomp.Kxz * precomp.Kzzinv_kzx.T, axis=1)
    sigmac = np.sum((params.L.T @ precomp.Kzzinv_kzx) ** 2, axis=0)
    sigmatilde = sigmaa - sigmab + sigmac
    eqn19a, eqn19b, eqn19c = expected_log_f2(mutilde, np.sqrt(sigmatilde))
    eqn19sum = -(eqn19c + eqn19a + eqn19b)@pij_flatten

    kl_normal = kl_tril(params.L, params.m, precomp.Lzz, 0)
    kl_g = kl_gamma(params.scale,params.shape, g0_params['scale'],g0_params['shape'])

    total = kl_normal+kl_g+eqn15sum + eqn16sum + eqn19sum +run_time*params.shape*params.scale-\
            pij0sum*(special.digamma(params.shape)+np.log(params.scale))
    return total
Exemplo n.º 2
0
 def __em_lb_likelihood(self, times, Y, expectations, **args):
     #1) Set default values to some variables
     args.setdefault('Sigma_w', self.Sigma_w)
     args.setdefault('Sigma_y', self.Sigma_y)
     args.setdefault('Sigma_w_val', cov_mat_precomp(args['Sigma_w']))
     args.setdefault('Sigma_y_val', cov_mat_precomp(args['Sigma_y']))
     args.setdefault('mu_w', self.mu_w)
     #2) Load values in some variables
     inv_sig_w = args['Sigma_w_val']['inv']
     log_det_sig_w = args['Sigma_w_val']['log_det']
     inv_sig_y = args['Sigma_y_val']['inv']
     log_det_sig_y = args['Sigma_y_val']['log_det']
     mu_w = args['mu_w']
     w_means = expectations['w_means']
     w_covs = expectations['w_covs']
     Phi = self.__get_Phi(times, **args)
     #3) Actually compute lower bound
     ans = 0.0
     for n in xrange(len(times)):
         Tn = len(times[n])
         lpw = log_det_sig_w + np.trace(np.dot(
             inv_sig_w, w_covs[n])) + quad(w_means[n] - mu_w, inv_sig_w)
         lhood = 0.0
         for t in xrange(Tn):
             phi_nt = Phi[n][t]
             y_nt = Y[n][t]
             lhood = lhood + log_det_sig_y + quad(y_nt-np.dot(phi_nt,w_means[n]),inv_sig_y) + \
                     np.trace(np.dot(inv_sig_y, np.dot(phi_nt, np.dot(w_covs[n], phi_nt.T))))
         ans = ans + lpw + lhood
     return -0.5 * ans
Exemplo n.º 3
0
 def cost(self, mu_b, sigma_b, u, a):
     if a:
         return (mu_b - self._g).T @ np.diag(100. * self._bw) @ (mu_b - self._g) +\
                np.trace(np.diag(100. * self._vw) @ sigma_b)
     else:
         return u.T @ np.diag(self._uw) @ u + np.trace(
             np.diag(self._vw) @ sigma_b)
def log_marginal_likelihood(paramslin, x, z, pij, pij_flatten, pij0sum, run_time,taus, gamma, alpha, precomp):
    params = util.unlinearise_params(paramslin, verbose=0)
    d, nz = z.shape
    nx = x.shape[1]
    s = params.L @ params.L.T+__nugget(params.L.shape[1])
    eqn15sum = (params.m.T @ precomp.Kzzinv_psi_sum_Kzzinv @params.m)[0,0]

    eqn16a = np.trace(precomp.Kzzinv_psi_sum)
    eqn16b = np.trace(precomp.Kzzinv_psi_sum_Kzzinv @ s)
    eqn16sum = gamma*np.sum((run_time-x[0])**d)-eqn16a + eqn16b

    mutilde = (precomp.Kzzinv_kzx.T @ params.m).flatten()
    sigmaa = precomp.sigmas
    sigmab = np.sum(precomp.Kxz * precomp.Kzzinv_kzx.T, axis=1)
    sigmac = np.sum((params.L.T @ precomp.Kzzinv_kzx) ** 2, axis=0)
    sigmatilde = sigmaa - sigmab + sigmac
    eqn19a, eqn19b, eqn19c = expected_log_f2(mutilde, np.sqrt(sigmatilde))
    eqn19sum = -(eqn19c + eqn19a + eqn19b)@pij_flatten

    ppij = pij[pij > 0]

    total = eqn15sum + eqn16sum + eqn19sum + run_time * params.shape * params.scale - \
            pij0sum*(special.digamma(params.shape) + np.log(params.scale)) + ppij @ np.log(ppij)

    return -total
Exemplo n.º 5
0
def fiml_auto(sigma_chol):

    sigma_chol = sigma_chol.reshape(n_predictors + 1, n_predictors + 1)
    sigma = np.dot(sigma_chol, sigma_chol.T)

    test_x, test_y = X[mask], Y[mask]
    mask_train = ((mask + 1) % 2).astype(bool)
    train_x, train_y = X[mask_train], Y[mask_train]
    missing = ((mask_var + 1) % 2).astype(bool)

    joint_test = np.concatenate([test_y, test_x], axis=1)
    samp_cov = np.cov(joint_test.T)

    joint_train = np.concatenate([train_y, train_x], axis=1)
    samp_cov_t = np.cov(joint_train[:, 0:n_causes + 1].T)

    L = -np.trace(np.dot(np.linalg.inv(sigma), samp_cov))
    L -= np.log(np.linalg.det(sigma))
    L *= test_x.shape[0]

    det_sub = np.linalg.det(sigma[0:n_causes + 1, 0:n_causes + 1])

    L_tr = -np.trace(
        np.dot(np.linalg.inv(sigma[0:n_causes + 1, 0:n_causes + 1]),
               samp_cov_t))
    L_tr -= np.log(det_sub)
    L_tr *= train_x.shape[0]

    return -(L + L_tr)
Exemplo n.º 6
0
Arquivo: fca.py Projeto: zuoshifan/FCA
def free_kurtosis(X):
    """Compute free kurtosis k4 of a matrix X. This works for both self-adjoint and rectangular matrices."""
    N, M = X.shape
    # XXH = np.dot(X, X.T.conj())
    XXH = np.dot(X, np.conj(X.T))
    k4 = np.trace(np.dot(XXH, XXH)) / N - (1 + N / M) * (np.trace(XXH) / N)**2

    return k4
Exemplo n.º 7
0
def fiml(sigma_chol, n_predictors, mask, mask_var,X,Y, sub,n_ul = 0, 
         print_val = False):
    """
    Implements full information maximum likelihood for optimization.
    """
    sigma_chol = sigma_chol.reshape(n_predictors+1,n_predictors+1)
    sigma = np.dot(sigma_chol, sigma_chol.T)

    test_x, test_y = X[mask], Y[mask]

    mask_train = ((mask+1)%2).astype(bool)
    if n_ul>0:
        mask_train[0:n_ul] = False
    train_x, train_y = X[mask_train], Y[mask_train]
    missing = ((mask_var+1)%2).astype(bool)

    joint_test = np.concatenate([test_y, test_x],axis=1)
    samp_cov = np.cov(joint_test.T)

    joint_train = np.concatenate([train_y, train_x], axis=1)
    samp_cov_t = np.cov(joint_train[:,sub].T)

    L = np.linalg.solve(sigma, samp_cov)
    L = -np.trace(L)
    L -= np.log(np.linalg.det(sigma))
    L *= test_x.shape[0]

    det_sub = np.linalg.det(sigma[sub].T[sub].T)

    L_tr = np.linalg.solve(sigma[sub].T[sub].T,samp_cov_t)
    L_tr = -np.trace(L_tr)
    L_tr -= np.log(det_sub)
    L_tr *= train_x.shape[0]

    if n_ul > 0:
        set_n = np.arange(1,n_predictors+1)
        joint_ul = X[0:n_ul,:]#np.concatenate([Y[0:n_ul,:], X[0:n_ul,1:]], axis=1)

        samp_cov_t = np.cov(joint_ul.T)
        mask_ul = np.copy(mask_train)
        mask_ul[0:n_ul] = True
        mask_ul[n_ul:] = False
        det_sub = np.linalg.det(sigma[1:,1:])

        L_ul = -np.trace(np.dot(np.linalg.inv(sigma[1:,1:]),samp_cov_t))
        L_ul -= np.log(det_sub)
        L_ul *= n_ul
    else:
        L_ul = 0

    if print_val:
      print -(L + L_tr-L_ul)

    return -(L+L_tr-L_ul)
Exemplo n.º 8
0
    def elbo(y, phi, lam, pi, psi, sigma2s, mus, Sigmas, kernel_params):
        """
        phi [N, K] sample membership (cell line cluster)
        lam [G, L] feature membership (expression cluster)
        pi [K] sample mixture weight
        psi [L] feature mixture weights
        y[N, G, T] data
        mus [K, L, T] means
        """
        """
        conditional = np.array([list(map(
            lambda f, s: norm.logpdf(y, f, s).sum(axis=-1), Q[:, :-1], Q[:, -1]))
            for Q in np.concatenate([mus, sigma2s[:, :, np.newaxis]], 2)])

        conditional = conditional + np.log(mix)[:, :, np.newaxis, np.newaxis]
        assignments = np.einsum('nk, gl->klng', phi, lam)
        likelihood = np.sum(conditional * assignments)
        """

        likelihood = 0
        # data likelihood
        for l in range(L):
            for k in range(K):
                ll = np.sum(np.nan_to_num(norm.logpdf(
                    y, mus[k, l], np.sqrt(sigma2s[k, l]))), axis=-1)
                ll = ll - 0.5 * (np.trace(Sigmas[k, l] / sigma2s[k, l]))
                ll = ll * phi[:, k][:, np.newaxis]
                ll = ll * lam[:, l]
                likelihood = likelihood + np.sum(ll)

        # assignment likelihood
        likelihood = likelihood + np.sum(np.log(pi) * phi)
        likelihood = likelihood + np.sum(np.log(psi) * lam)

        # function liklihood
        for k in range(K):
            for l in range(L):
                Ker = cov_func(kernel_params[k, l], inputs, inputs)
                likelihood = likelihood \
                    + mvn.logpdf(mus[k, l], np.zeros(T), Ker) \
                    - 0.5 * np.trace(solve(Ker, Sigmas[k, l]))

        entropy = np.sum(list(map(multinomial_entropy, phi)) +
                         list(map(multinomial_entropy, lam)))
        for k in range(K):
            for l in range(L):
                entropy = entropy + mvn.entropy(mus[k, l], Sigmas[k, l])

        return likelihood + entropy
Exemplo n.º 9
0
 def evaluate(self, x, u, stoch=True):
     ret = 0.
     _u = np.hstack((u.mu, np.zeros((self.dm_act, 1))))
     for t in range(self.nb_steps):
         ret += x.mu[..., t].T @ self.Cxx[..., t] @ x.mu[..., t] +\
                _u[..., t].T @ self.Cuu[..., t] @ _u[..., t] +\
                x.mu[..., t].T @ self.Cxu[..., t] @ _u[..., t] +\
                self.cx[..., t].T @ x.mu[..., t] +\
                self.cu[..., t].T @ _u[..., t] + self.c0[..., t]
         if stoch:
             # does not consider cross terms for now
             ret += np.trace(self.Cxx[..., t] @ x.sigma[..., t])
             if t < self.nb_steps - 1:
                 ret += np.trace(self.Cuu[..., t] @ u.sigma[..., t])
     return ret
Exemplo n.º 10
0
def test_trace_product():
    A = np.random.randn(100, 50, 10)
    B = np.random.randn(100, 10, 50)
    assert np.allclose(ssm.util.trace_product(A, B),
                       np.trace(A @ B, axis1=1, axis2=2))

    A = np.random.randn(50, 10)
    B = np.random.randn(10, 50)
    assert np.allclose(ssm.util.trace_product(A, B),
                       np.trace(A @ B))

    A = np.random.randn(1, 1)
    B = np.random.randn(1, 1)
    assert np.allclose(ssm.util.trace_product(A, B),
                       np.trace(A @ B))
Exemplo n.º 11
0
    def cost(self, controls, densities, system_step):
        """
        Args:
        controls :: ndarray - the control parameters for all time steps
        densities :: ndarray - an array of the initial densities evolved to
            the current time step
        system_step :: int - the system time step
        Returns:
        cost :: float - the penalty
        """
        cost = 0
        # Compute the fidelity for each evolution density and its forbidden densities.
        for i, density_forbidden_densities_dagger in enumerate(
                self.forbidden_densities_dagger):
            density = densities[i]
            density_cost = 0
            for forbidden_density_dagger in density_forbidden_densities_dagger:
                inner_product = (
                    anp.trace(anp.matmul(forbidden_density_dagger, density)) /
                    self.hilbert_size)
                density_cost = density_cost + anp.square(
                    anp.abs(inner_product))
            #ENDFOR
            cost = cost + anp.divide(density_cost,
                                     self.density_normalization_constants[i])
        #ENDFOR

        # Normalize the cost for the number of evolving densities
        # and the number of time evolution steps.
        cost = (cost / self.normalization_constant)

        return self.cost_multiplier * cost
Exemplo n.º 12
0
 def incremental_ll(theta, mu, z, m, C, V, eta_k, d, t):
     # m is the missing mask for y.
     M = np.diag(m)
     U = normnon(theta, mu, V, t) * M + eta_k * np.eye(d)
     Ui = np.diag(1.0 / np.diag(U))
     diff = z - M @ C @ self.nonlinearity(theta, mu, t)
     return 0.5 * np.trace(np.log(U)) + 0.5 * diff.T @ Ui @ diff
    def losswlog(K_conj):
        """
            K is a tensor of CONJUGATE Kraus Operators of dim s x y x x x x
            s: dim of features
            y: number of features
            x: number of labels
        """
        total_loss = 0.0

        # Iterate over each sequence in batch
        for i in range(labels.shape[0]):
            features = feats_matrix[i, :]
            label = labels[i] - 1

            # Compute likelihood of the label generating the given features
            conjKrausProduct = K_conj[features[0] - 1, 0, :, :]
            for s in range(1, features.shape[0]):
                conjKrausProduct = np.dot(K_conj[features[s] - 1, s, :, :],
                                          conjKrausProduct)

            eta = np.zeros([K_conj.shape[3], K_conj.shape[3]],
                           dtype='complex128')
            eta[label, label] = 1

            prod1 = np.dot(np.conjugate(conjKrausProduct), eta)
            prod2 = np.dot(prod1, conjKrausProduct.T)
            total_loss += np.log(np.real(np.trace(prod2)))

            # total_loss += np.real(np.trace(np.kron(np.conjugate(conjKrausProduct)[:, label], conjKrausProduct.T[:, label]).reshape(K_conj.shape[2], K_conj.shape[3])))

        return -total_loss / labels.shape[0]
def update_x_lp_cond_z(x, x2, z_update, zx, \
                        var_inv, logdet_var, sigma_eps, sigma_a, K_approx, n,k):
    # likelihood p(X|Z)-- equation (8) in Griffiths and Ghahramani
    # http://mlg.eng.cam.ac.uk/zoubin/papers/ibp-nips05.pdf
    # outputs the likelihood at Z  when Z has component n,k flipped
    # var_inv refers to inv(Z^T * Z + sigma_eps/sigma_a I), the previous inverse
    x_d = np.shape(x)[1]
    x_n = np.shape(x)[0]
    k_approx = np.shape(z_update)[1]

    assert np.shape(x)[0] == np.shape(z_update)[0]

    inv_var_flip, logdet_var_flip= \
        update_inv_var(z_update, var_inv, logdet_var, sigma_eps, sigma_a, n, k)

    #mean_a = np.dot(np.dot(inv_var_flip, z_update.T), x)
    mean_a = np.dot(inv_var_flip, zx)

    #log_likelihood = -1/(2*sigma_eps) * \
    #        np.trace(x2 - np.dot(x.T, np.dot(z_update, mean_a)) )
    log_likelihood = -1/(2*sigma_eps) * \
            np.trace(x2 -np.dot(zx.T, mean_a) )

    #[_, logconst] = np.linalg.slogdet(inv_var_flip)

    return log_likelihood - (x_d/2) * logdet_var_flip, inv_var_flip, logdet_var_flip
    def cost(self, controls, densities, sytem_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        densities
        system_eval_step

        Returns:
        cost
        """
        # The cost is the infidelity of each evolved density and its target density.
        # NOTE: Autograd doesn't support vjps of anp.trace with axis arguments.
        # Nor does it support the vjp of anp.einsum(...ii->..., a).
        # Therefore, we must use a for loop to index the traces.
        # The following computations are equivalent to:
        # inner_products = (anp.trace(anp.matmul(self.target_densities_dagger, densities),
        #                             axis1=-1, axis2=-2) / self.hilbert_size)
        prods = anp.matmul(self.target_densities_dagger, densities)
        fidelity_sum = 0
        for i, prod in enumerate(prods):
            inner_prod = anp.trace(prod)
            fidelity = anp.abs(inner_prod)
            fidelity_sum = fidelity_sum + fidelity
        fidelity_normalized = fidelity_sum / (self.density_count * self.hilbert_size)
        infidelity = 1 - fidelity_normalized
        cost_normalized = infidelity / self.cost_eval_count

        return cost_normalized * self.cost_multiplier
Exemplo n.º 16
0
def test_laplacian(X, alpha, beta):
    psi = SimpleGaussian(alpha, beta)

    laplacian = psi.laplacian(X) * psi(X)
    assert_close(
        np.trace(hessian(psi_np)(X, alpha, beta).reshape(X.size, X.size)),
        laplacian)
Exemplo n.º 17
0
def extract_variance(cov, mode="avg"):
    """Function to extract the variance for weighting our relative pose
    estimate over a rigid-body transformation.

    Parameters:
        cov (np.array): A 3x3 matrix corresponding to a covariance matrix,
            for either the covariance of a set of translations, or over
            rotations given by a vector of Euler angles.
        mode (str):  Mode determining which variance value to extract.  Choices:
            {'avg', 'max', 'min'}.

    Returns:
        var (float):  Float value corresponding to the variance used for
            weighting our relative pose estimate.
    """
    if mode == "avg":  # Average diagonal elements
        return np.trace(cov) / cov.shape[0]

    elif mode == "max":  # Take maximum over diagonal elements (variance values)
        return max([cov[0, 0], cov[1, 1], cov[2, 2]])

    elif mode == "min":  # Take minimum over diagonal elements (variance values)
        return min([cov[0, 0], cov[1, 1], cov[2, 2]])

    else:  # No valid options selected
        print("Invalid mode specified.  Please choose from {avg, max}.")
Exemplo n.º 18
0
def mm_error(A, WtW):
    """ Compute the expected error of A on W, under the following assumptions:
            1. A is a sensitivity 1 strategy
            2. A supports W
    """
    AtA1 = np.linalg.pinv(np.dot(A.T, A))
    return np.trace(np.dot(AtA1, WtW))
Exemplo n.º 19
0
    def cost(self, controls, densities, sytem_step):
        """
        Args:
        controls :: ndarray (control_step_count x control_count)
            - the control parameters for all time steps
        densities :: ndarray (density_count x hilbert_size x hilbert_size)
            - an array of the densities evolved to
            the current time step
        system_step :: int - the system time step

        Returns:
        cost :: float - the penalty
        """
        hilbert_size = self.hilbert_size
        fidelity = 0
        for i, target_density_dagger in enumerate(
                self.target_densities_dagger):
            density = densities[i]
            inner_product = anp.trace(
                anp.matmul(target_density_dagger, density))
            inner_product_normalized = inner_product / hilbert_size
            fidelity = fidelity + anp.square(anp.abs(inner_product_normalized))
        infidelity = 1 - (fidelity / self.density_count)
        infidelity_normalized = infidelity / self.system_step_count
        return self.cost_multiplier * infidelity
Exemplo n.º 20
0
 def test_inner(self):
     X = self.man.rand()
     G = self.man.randvec(X)
     H = self.man.randvec(X)
     np_testing.assert_almost_equal(np.real(np.trace(np.conjugate(G.T)@H)),
                                    self.man.inner(X, G, H))
     assert np.isreal(self.man.inner(X, G, H))
Exemplo n.º 21
0
    def klm(self, pi):
        diff = pi.mu - self.mu

        kl = 0.5 * (np.trace(np.linalg.inv(pi.cov) @ self.cov) +
                    diff.T @ np.linalg.inv(pi.cov) @ diff - self.dm_act +
                    np.log(np.linalg.det(pi.cov) / np.linalg.det(self.cov)))
        return kl
Exemplo n.º 22
0
def slogdet_jvp(g, ans, x):
    # Due to https://github.com/HIPS/autograd/issues/115
    # and https://github.com/HIPS/autograd/blob/65c21e/tests/test_numpy.py#L302
    # it does not seem easy to take the trace of the last two dimensions of
    # a multi-dimensional array at this time.
    if len(x.shape) > 2:
        raise ValueError('JVP is only supported for 2d input.')
    return 0, np.trace(np.linalg.solve(x.T, g.T))
Exemplo n.º 23
0
 def _KL_M_Projection(mu_pre, sigma_pre, mu_post, sigma_post):
     diff = mu_post - mu_pre
     inverse = np.linalg.inv(sigma_post)
     _, slog_post = np.linalg.slogdet(sigma_post)
     _, slog_pre = np.linalg.slogdet(sigma_pre)
     kl = 0.5 * (np.trace(inverse @ sigma_pre) + diff.T @ inverse @ diff +
                 slog_post - slog_pre - mu_pre.shape[0])
     return kl
Exemplo n.º 24
0
def anglerotation(R):
    """
    compute the angle of the rotation matrix R
    :param R: the rotation matrix
    :return: the angle of rotation in degree
    """
    theta = np.rad2deg(np.arccos((np.trace(R) - 1) / 2))
    return theta
def likelihood_UB(hyp):
    X = ModelInfo["X_batch"]
    y = ModelInfo["y_batch"]
    Z = ModelInfo["Z"]
    m = ModelInfo["m"]
    S = ModelInfo["S"]
    jitter = ModelInfo["jitter"]
    jitter_cov = ModelInfo["jitter_cov"]

    N = X.shape[0]
    M = Z.shape[0]

    logsigma_n = hyp[-1]
    sigma_n = np.exp(logsigma_n)

    # Compute K_u_inv
    K_u = kernel(Z, Z, hyp[:-1])
    K_u_inv = np.linalg.solve(K_u + np.eye(M) * jitter_cov, np.eye(M))
    #    L = np.linalg.cholesky(K_u  + np.eye(M)*jitter_cov)
    #    K_u_inv = np.linalg.solve(np.transpose(L), np.linalg.solve(L,np.eye(M)))

    ModelInfo.update({"K_u_inv": K_u_inv})

    # Compute mu
    psi = kernel(Z, X, hyp[:-1])
    K_u_inv_m = np.matmul(K_u_inv, m)
    MU = np.matmul(psi.T, K_u_inv_m)

    # Compute cov
    Alpha = np.matmul(K_u_inv, psi)
    COV = kernel(X, X, hyp[:-1]) - np.matmul(psi.T, np.matmul(K_u_inv,psi)) + \
            np.matmul(Alpha.T, np.matmul(S,Alpha))

    COV_inv = np.linalg.solve(COV + np.eye(N) * sigma_n + np.eye(N) * jitter,
                              np.eye(N))
    #    L = np.linalg.cholesky(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter)
    #    COV_inv = np.linalg.solve(np.transpose(L), np.linalg.solve(L,np.eye(N)))

    # Compute cov(Z, X)
    cov_ZX = np.matmul(S, Alpha)

    # Update m and S
    alpha = np.matmul(COV_inv, cov_ZX.T)
    m = m + np.matmul(cov_ZX, np.matmul(COV_inv, y - MU))
    S = S - np.matmul(cov_ZX, alpha)

    ModelInfo.update({"m": m})
    ModelInfo.update({"S": S})

    # Compute NLML
    Beta = y - MU
    NLML_1 = np.matmul(Beta.T, Beta) / (2.0 * sigma_n * N)

    NLML_2 = np.trace(COV) / (2.0 * sigma_n)
    NLML_3 = N * logsigma_n / 2.0 + N * np.log(2.0 * np.pi) / 2.0
    NLML = NLML_1 + NLML_2 + NLML_3

    return NLML[0, 0]
Exemplo n.º 26
0
 def test_inner_product(self):
     X = self.manifold.random_point()
     G = self.manifold.random_tangent_vector(X)
     H = self.manifold.random_tangent_vector(X)
     np_testing.assert_almost_equal(
         np.real(np.trace(np.conjugate(G.T) @ H)),
         self.manifold.inner_product(X, G, H),
     )
     assert np.isreal(self.manifold.inner_product(X, G, H))
Exemplo n.º 27
0
def fnorm(matrix):
    """
    get the F norm.
    """
    U, S, V = np.linalg.svd(matrix)
    S = np.diag(S)
    fnorm_s = np.sqrt(np.trace(np.square(S)))

    return fnorm_s
Exemplo n.º 28
0
def test_laplacian(X, alpha):
    psi = SimpleGaussian(alpha)
    pool = SumPooling(psi)

    expected = np.trace(
        hessian(sum_pool_np)(X, alpha).reshape(X.size, X.size)) / sum_pool_np(
            X, alpha)

    assert_close(expected, pool.laplacian(X))
Exemplo n.º 29
0
 def loss(self, weights):
     """
     Y: onehot encoded
     """
     z = -self.X_ @ weights
     loss = 1 / (
         self.n_samples) * (np.trace(self.X_ @ weights @ self.y_encoded.T) +
                            np.sum(np.log(np.sum(np.exp(z), axis=1))))
     return loss
Exemplo n.º 30
0
        def cost_fn(x, return_fidelity=False):
            new_x = []
            pre = 0
            post = 0
            for i, system in enumerate(system_list):
                if systems_to_optimize[i]:
                    post = post + system.parameters["num_phases"]
                    new_x.append(x[pre:post])
                    pre = system.parameters["num_phases"]
            x = new_x

            for i, idx in enumerate(idxs):
                S_list[idx] = builds_list[idx](x[i])
                SH_list[idx] = np.conj(S_list[idx].T)

            cost = 0
            min_fid = 1
            for i, single_rho in enumerate(encoding.rhos_inputs):
                rho = np.copy(single_rho)

                for j, system in enumerate(system_list):
                    if len(system_list) > 1:

                        if system.system_type == "decoder" and j > 0:
                            pre_sys = system_list[j - 1]
                            rho_ta = pre_sys.encoding.lossless_to_targets(rho)
                            rho = system.apply_loss(rho=rho_ta, verbose=False)

                        rho = np.dot(np.dot(S_list[j], rho), SH_list[j])
                        rho = reset_ancillae(rho, system)
                    else:
                        rho = np.dot(np.dot(S_list[j], rho), SH_list[j])

                rho = system_list[-1].encoding.lossless_to_targets(rho)

                fidelity = np.abs(np.trace(np.dot(encoding.rhos_targets[i], rho)))
                if fidelity < min_fid:
                    min_fid = fidelity
                cost = cost + (1 - fidelity * np.abs(np.trace(rho)))

            if return_fidelity:
                return min_fid
            else:
                return cost / len(encoding.rhos_inputs)
Exemplo n.º 31
0
def get_trace_from_ws_and_Ks(eps, Ky, Kx, ws=None):
    Gy = center_K(Ky)
    Gx = center_K(Kx)
    N = len(Kx)
    #print 'ws', ws
    if ws is None:
        ws = np.ones(N)
#    pdb.set_trace()
    ans = np.trace(np.dot(np.dot(np.diag(ws), Gy), np.linalg.inv(np.dot(np.diag(ws), Gx + float(N) * eps * np.eye(N)))))    
    return ans
Exemplo n.º 32
0
def KL_two_gaussians(params):
    d = np.shape(params)[0]-1
    mu = params[0:d,0]
    toSigma = params[0:d,1:d+1]
    intSigma = toSigma-np.diag(np.diag(toSigma))+np.diag(np.exp(np.diag(toSigma)))
    Sigma = intSigma-np.tril(intSigma)+np.transpose(np.triu(intSigma))
    muPrior = np.zeros(d)
    sigmaPrior = np.identity(d)
    #print Sigma
    #print np.linalg.det(Sigma)
    return 1/2*(np.log(np.linalg.det(Sigma)/np.linalg.det(sigmaPrior))-d+np.trace(np.dot(np.linalg.inv(Sigma),sigmaPrior))+np.dot(np.transpose(mu-muPrior),np.dot(np.linalg.inv(Sigma),mu-muPrior)))
Exemplo n.º 33
0
    def loss(weights):
        mu1 = parser.get(weights, 'mu1')
        mu2 = parser.get(weights, 'mu2')
        sig1 = parser.get(weights, 'sig1')*np.eye(mu1.size)
        sig2 = parser.get(weights, 'sig2')*np.eye(mu1.size)

        
        return 0.5*( \
            np.log(np.linalg.det(sig2) / np.linalg.det(sig1)) \
            - mu1.size \
            + np.trace(np.dot(np.linalg.inv(sig2),sig1)) \
            #+ np.dot(np.dot(np.transpose(mu2 - mu1), np.linalg.inv(sig2)), mu2 - mu1 )
            + np.dot(np.dot(mu2 - mu1, np.linalg.inv(sig2)), np.transpose(mu2 - mu1 ))
            )
Exemplo n.º 34
0
def trance_quad(W, A): 
	return np.trace(np.dot(np.dot(np.transpose(W),A), W))
Exemplo n.º 35
0
 def fun(x): return np.trace(x)
 d_fun = lambda x : to_scalar(grad(fun)(x))
Exemplo n.º 36
0
 def fun(x): return to_scalar(np.trace(x, offset=offset))
 d_fun = lambda x : to_scalar(grad(fun)(x))
Exemplo n.º 37
0
def KL_two_gaussians(params):
    mu = params[0:len(params)/2]
    Sigma = np.diag(np.exp(params[len(params)/2:]))
    muPrior = np.zeros(d)
    sigmaPrior = np.identity(d)
    return 1/2*(np.log(np.linalg.det(Sigma)/np.linalg.det(sigmaPrior))-d+np.trace(np.dot(np.linalg.inv(Sigma),sigmaPrior))+np.dot(np.transpose(mu-muPrior),np.dot(np.linalg.inv(Sigma),mu-muPrior)))
	K_unc = calcSigma(np.array(unc_mean).reshape((-1,1)),x_train,length_scale).flatten() # Gather gaussian kernel of how input mean relates to training inputs
	K_unc_adj = calcSigma([unc_mean],x_train,-1.0*np.sqrt((length_scale*(length_scale+unc_var))/unc_var)) # Generate adjusted kernel-like distribution

	ll = ((1+(unc_var/length_scale))**(-0.5)*K_unc*K_unc_adj).T # Calculate l, the adjusted Kernel

	L = np.zeros((n_train,n_train))
	for ii in range(n_train):
		for jj in range(n_train):
			xd = np.mean([x_train[ii],x_train[jj]])
			L[ii,jj] = K_unc[ii]*K_unc[jj]*(1+2*(unc_var/length_scale))**(-0.5) * np.exp(0.5*(unc_var/((0.5*length_scale + unc_var)*(0.5*length_scale)))*(unc_mean-xd)**2)

	# Generate posterior mean and variance
	post_unc_mean = (Beta.T.dot(ll)).flatten()[0]

	post_unc_var = (1 - np.trace( np.dot(Omg,L)) ) + ( np.trace( np.dot( np.dot(Beta,Beta.T), L-np.dot(ll,ll.T) ) ) )

	# Plot posterior output distribution
	exact_unc_xs = np.linspace(-4,4,10*n_pts)
	plt.figure(figsize=(16,18))
	plt.subplot(2,2,1)
	plt.plot(plotGaussian(exact_unc_xs,post_unc_mean,post_unc_var),exact_unc_xs,'g-',lw=3)
	# plt.ylim([-4, 4])
	plt.title("Posterior Output Distribution--Exact Method")
	plt.ylabel("output, f(x)")
	plt.subplot(2,2,2)
	plt.plot(full_x,post_sampled_values[:,0],full_x,post_sampled_values[:,1],full_x,post_sampled_values[:,2])
	plt.fill_between(full_x.flatten(),f_post_lower,f_post_upper,color='0.15',alpha=0.25)
	plt.plot(full_x,true_func(full_x),'k-',lw=3,label='True Function',alpha=0.35) # Plot the underlying function generating the data
	plt.plot(x_train,y_train,'r*',markersize=10,label='Training Input',alpha=0.35) 
	plt.ylim([-4,4])