Exemplo n.º 1
0
    def g():
        d = len(Mu)
        assert Mu.shape == (d,), "Mu must be a vector"
        assert A.shape == (d, d), "A must be a square matrix"
        assert (A.T == A).all(), "and symmetric"
        assert V.shape == (d, d), "V must be a square matrix"
        assert (V.T == V).all(), "and symmetric"

        a = chol(A)
        v = chol(V)

        B = dot(V, inv(V + A))
        _a2 = V - dot(B, V)
        _a2 = chol(_a2)

        Y, U = array([0.0] * d), array([0.0] * d)

        for i in range(n + burnin):
            for _ in range(thin):  # skipe
                # sample Y | U ~ N(U, V)
                Y = U + dot(v, random.normal(size=d))

                # sample U | Y ~ N(A(A+V)^-1)(Y-Mu) + Mu,
                #                  A - A(A+V)^-1A)
                U = dot(B, (Mu - Y)) + Y + +dot(_a2, random.normal(size=d))

            if i >= burnin:
                yield [U, Y]
Exemplo n.º 2
0
    def g():

        d = len(Mu)  # dimensionality...
        assert Mu.shape == (d,), "Mu must be a vector"
        assert Sigma.shape == (d, d), "Sigma must be a square matrix"
        assert (Sigma.T == Sigma).all(), "and symmetric"
        assert 0 <= j < d

        Mu_1, Mu_2 = Mu[:j], Mu[j:]
        Sigma_11, Sigma_12, Sigma_22 = Sigma[:j, :j], Sigma[:j, j:], Sigma[j:, j:]

        Y = array([0.0] * d)  # it doesn't matter what we init Y to except that it is the right size, so init it to 0

        for i in range(n + burnin):
            for _ in range(thin):  # skipe
                # TODO: precompute factors
                # also, you can probably get by faster by forcing Mu=0 during the gibbs sampling since then you avoid having to immediately subtract it off in the next tep, and then doing a single vectorized Y+=Mu at the end (or yield Y+Mu is close enough)

                if j > 0:
                    Y[:j] = (
                        Mu_1
                        + dot(dot(Sigma_12, inv(Sigma_22)), Y[j:] - Mu_2)
                        + dot(chol(Sigma_11 - dot(dot(Sigma_12, inv(Sigma_22)), Sigma_12.T)), random.normal(size=j))
                    )
                if j < d:
                    Y[j:] = (
                        Mu_2
                        + dot(dot(Sigma_12.T, inv(Sigma_11)), Y[:j] - Mu_1)
                        + dot(
                            chol(Sigma_22 - dot(dot(Sigma_12.T, inv(Sigma_11)), Sigma_12)), random.normal(size=(d - j))
                        )
                    )
            if i >= burnin:
                yield array(Y)
Exemplo n.º 3
0
 def rvs(self):
     """Draw one sample. See p.582 of Gelman."""
     x = stats.wishart(df=self.ν).rvs()
     z = stats.multivariate_normal(mean=np.zeros(self.D),
                                   cov=np.eye(self.D)).rvs()
     A = chol(inv(self.Λ))
     return self.μ + A @ z * sqrt(self.ν / x)
Exemplo n.º 4
0
    def post(self, hyp, covfunc, X, y):
        """ Generic function to compute posterior distribution.
        """

        if len(hyp.shape) > 1:  # force 1d hyperparameter array
            hyp = hyp.flatten()

        if len(X.shape) == 1:
            X = X[:, np.newaxis]
        self.N, self.D = X.shape

        # hyperparameters
        sn2 = np.exp(2 * hyp[0])  # noise variance
        if self.warp is not None:  # parameters for warping the likelhood
            n_lik_param = self.n_warp_param + 1
        else:
            n_lik_param = 1
        theta = hyp[n_lik_param:]  # (generic) covariance hyperparameters

        if self.verbose:
            print("estimating posterior ... | hyp=", hyp)

        self.K = covfunc.cov(theta, X)
        self.L = chol(self.K + sn2 * np.eye(self.N))
        self.alpha = solve(self.L.T, solve(self.L, y))
        self.hyp = hyp
        self.covfunc = covfunc
Exemplo n.º 5
0
def choldelete(R, i_del):
    rows = R.shape[0]
    S0 = np.array([R[i_del, (i_del + 1):]])
    S1 = R[(i_del + 1):, (i_del + 1):]
    R = np.delete(R, i_del, 1)
    R = np.delete(R, [range(i_del, rows)], 0)
    S = chol(S1.T.dot(S1) + S0.T.dot(S0)).T
    return np.r_[R, np.c_[np.zeros((rows - i_del - 1, i_del)), S]]
Exemplo n.º 6
0
def CovarianceMaatrixAdaptionEvolutionStrategyF(task, epsilon=1e-20, rnd=rand):
    lam, alpha_mu, hs, sigma0 = (
        4 + np.round(3 * np.log(task.D))) * 10, 2, 0, 0.3 * task.range()
    mu = int(np.round(lam / 2))
    w = np.log(mu + 0.5) - np.log(range(1, mu + 1))
    w = w / np.sum(w)
    mueff = 1 / np.sum(w**2)
    cs = (mueff + 2) / (task.D + mueff + 5)
    ds = 1 + cs + 2 * max(np.sqrt((mueff - 1) / (task.D + 1)) - 1, 0)
    ENN = np.sqrt(task.D) * (1 - 1 / (4 * task.D) + 1 / (21 * task.D**2))
    cc, c1 = (4 + mueff / task.D) / (4 + task.D + 2 * mueff / task.D), 2 / (
        (task.D + 1.3)**2 + mueff)
    cmu, hth = min(
        1 - c1,
        alpha_mu * (mueff - 2 + 1 / mueff) /
        ((task.D + 2)**2 + alpha_mu * mueff / 2)), (1.4 + 2 /
                                                    (task.D + 1)) * ENN
    ps, pc, C, sigma, M = np.full(task.D, 0.0), np.full(task.D, 0.0), np.eye(
        task.D), sigma0, np.full(task.D, 0.0)
    x = rnd.uniform(task.lower(), task.upper())
    x_f = task.eval(x)
    while not task.stop_cond_i():
        pop_step = np.asarray([
            rnd.multivariate_normal(np.full(task.D, 0.0), C)
            for _ in range(int(lam))
        ])
        pop = np.asarray([task.repair(x + sigma * ps, rnd) for ps in pop_step])
        pop_f = np.apply_along_axis(task.eval, 1, pop)
        isort = np.argsort(pop_f)
        pop, pop_f, pop_step = pop[isort[:mu]], pop_f[isort[:mu]], pop_step[
            isort[:mu]]
        if pop_f[0] < x_f: x, x_f = pop[0], pop_f[0]
        M = np.sum(w * pop_step.T, axis=1)
        ps = solve(
            chol(C).conj() + epsilon,
            ((1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * M +
             epsilon).T)[0].T
        sigma *= np.exp(cs / ds * (norm(ps) / ENN - 1))**0.3
        ifix = np.where(sigma == np.inf)
        if np.any(ifix): sigma[ifix] = sigma0
        if norm(ps) / np.sqrt(1 - (1 - cs)**(2 * (task.Iters + 1))) < hth:
            hs = 1
        else:
            hs = 0
        delta = (1 - hs) * cc * (2 - cc)
        pc = (1 - cc) * pc + hs * np.sqrt(cc * (2 - cc) * mueff) * M
        C = (1 - c1 - cmu) * C + c1 * (np.tile(pc, [len(pc), 1]) * np.tile(
            pc.reshape([len(pc), 1]), [1, len(pc)]) + delta * C)
        for i in range(mu):
            C += cmu * w[i] * np.tile(
                pop_step[i], [len(pop_step[i]), 1]) * np.tile(
                    pop_step[i].reshape([len(pop_step[i]), 1]),
                    [1, len(pop_step[i])])
        E, V = eig(C)
        if np.any(E < epsilon):
            E = np.fmax(E, 0)
            C = lstsq(V.T, np.dot(V, np.diag(E)).T, rcond=None)[0].T
    return x, x_f
Exemplo n.º 7
0
    def __init__(self, mu, lmbda, kappa, nu):
        super(NormalInverseWishart, self).__init__()

        self.mu = mu
        self.lmbda = lmbda
        self.nu = nu
        self.kappa = kappa
        self.lmbda_chol = chol(lmbda)
        self.d = mu.shape[0]
Exemplo n.º 8
0
def direct_hierarchical_normal(n, V, Mu, A):
    """
	sample
		(y1,y2) such that
		y1 ~ N(Mu, A)
		y2|y1 ~ N(y1, V)
	directly from the definition
	(for comparison)
	"""
    a = chol(A)
    v = chol(V)
    d = len(Mu)

    def model():
        z1, z2 = random.normal(size=d), random.normal(size=d)
        y1 = Mu + dot(a, z1)
        y2 = y1 + dot(v, z2)
        return y1, y2

    return array([model() for i in range(n)])
Exemplo n.º 9
0
def stouffer_liptak(pvals, sigma):
    qvals = norm.isf(pvals).reshape(len(pvals), 1)
    try:
        C = np.asmatrix(chol(sigma)).I
    except np.linalg.linalg.LinAlgError:
        # for non positive definite matrix default to z-score correction.
        z, L = np.mean(norm.isf(pvals)), len(pvals)
        sz = 1.0 / L * np.sqrt(L + 2 * np.tril(sigma, k=-1).sum())
        return norm.sf(z / sz)

    qvals = C * qvals
    Cp = qvals.sum() / np.sqrt(len(qvals))
    return norm.sf(Cp)
Exemplo n.º 10
0
def stouffer_liptak(pvals, sigma):
    qvals = norm.isf(pvals).reshape(len(pvals), 1)
    try:
        C = np.asmatrix(chol(sigma)).I
    except np.linalg.linalg.LinAlgError:
        # for non positive definite matrix default to z-score correction.
        z, L = np.mean(norm.isf(pvals)), len(pvals)
        sz = 1.0 / L * np.sqrt(L + 2 * np.tril(sigma, k=-1).sum())
        return norm.sf(z / sz)

    qvals = C * qvals
    Cp = qvals.sum() / np.sqrt(len(qvals))
    return norm.sf(Cp)
Exemplo n.º 11
0
def sigmas(x, P, cc):
    """ Sigma points around reference point

    :param x:   reference point
    :param P:   covariance
    :param c:   coefficient
    :return:    Sigma points
    """
    A = cc * chol(P).T
    Y = x[:, zeros(x.size, int)]
    X = np.hstack((x, Y + A, Y - A))

    return X
Exemplo n.º 12
0
def stouffer_liptak(pvals, sigma=None):
    """
    The stouffer_liptak correction.
    >>> stouffer_liptak([0.1, 0.2, 0.8, 0.12, 0.011])
    {'p': 0.0168..., 'C': 2.1228..., 'OK': True}

    >>> stouffer_liptak([0.5, 0.5, 0.5, 0.5, 0.5])
    {'p': 0.5, 'C': 0.0, 'OK': True}

    >>> stouffer_liptak([0.5, 0.1, 0.5, 0.5, 0.5])
    {'p': 0.28..., 'C': 0.57..., 'OK': True}

    >>> stouffer_liptak([0.5, 0.1, 0.1, 0.1, 0.5])
    {'p': 0.042..., 'C': 1.719..., 'OK': True}

    >>> stouffer_liptak([0.5], np.matrix([[1]]))
    {'p': 0.5...}
    """
    L = len(pvals)
    pvals = np.array(pvals, dtype=np.float64)
    pvals[pvals == 1] = 1.0 - 9e-16
    qvals = norm.isf(pvals, loc=0, scale=1).reshape(L, 1)
    if any(np.isinf(qvals)):
        raise Exception("bad values: %s" % pvals[list(np.isinf(qvals))])

    # dont do the correction unless sigma is specified.
    result = {"OK": True}
    if not sigma is None:
        try:
            C = chol(sigma)
            Cm1 = np.asmatrix(C).I # C^-1
            # qstar
            qvals = Cm1 * qvals
        except LinAlgError as e:
            result["OK"] = False
            result = z_score_combine(pvals, sigma)
            return result

    Cp = qvals.sum() / np.sqrt(len(qvals))
    # get the right tail.
    pstar = norm.sf(Cp)
    if np.isnan(pstar):
        print("BAD:", pvals, sigma, file=sys.stderr)
        pstar = np.median(pvals)
        result["OK"] = True
    result.update({"C": Cp, "p": pstar})
    return result
def stouffer_liptak(pvals, sigma=None):
    """
    The stouffer_liptak correction.
    >>> stouffer_liptak([0.1, 0.2, 0.8, 0.12, 0.011])
    {'p': 0.0168..., 'C': 2.1228..., 'OK': True}

    >>> stouffer_liptak([0.5, 0.5, 0.5, 0.5, 0.5])
    {'p': 0.5, 'C': 0.0, 'OK': True}

    >>> stouffer_liptak([0.5, 0.1, 0.5, 0.5, 0.5])
    {'p': 0.28..., 'C': 0.57..., 'OK': True}

    >>> stouffer_liptak([0.5, 0.1, 0.1, 0.1, 0.5])
    {'p': 0.042..., 'C': 1.719..., 'OK': True}

    >>> stouffer_liptak([0.5], np.matrix([[1]]))
    {'p': 0.5...}
    """
    L = len(pvals)
    pvals = np.array(pvals, dtype=np.float64)
    pvals[pvals == 1] = 1.0 - 9e-16
    qvals = norm.isf(pvals, loc=0, scale=1).reshape(L, 1)
    if any(np.isinf(qvals)):
        raise Exception("bad values: %s" % pvals[list(np.isinf(qvals))])

    # dont do the correction unless sigma is specified.
    result = {"OK": True}
    if not sigma is None:
        try:
            C = chol(sigma)
            Cm1 = np.asmatrix(C).I  # C^-1
            # qstar
            qvals = Cm1 * qvals
        except LinAlgError as e:
            result["OK"] = False
            result = z_score_combine(pvals, sigma)
            return result

    Cp = qvals.sum() / np.sqrt(len(qvals))
    # get the right tail.
    pstar = norm.sf(Cp)
    if np.isnan(pstar):
        print("BAD:", pvals, sigma, file=sys.stderr)
        pstar = np.median(pvals)
        result["OK"] = True
    result.update({"C": Cp, "p": pstar})
    return result
Exemplo n.º 14
0
def gpr_mcmc(x, y, iters, xmin, xmax, gamma):
    xx = np.hstack((x, np.linspace(xmin, xmax, 100)))
    M = len(x)
    N = len(xx)
    K = kernel_matrix(xx, kgauss(1, 1))
    Kinv = inv(K[0:M, 0:M])
    S = chol(K)
    f = np.dot(S, randn(N))
    g = np.zeros(len(xx))
    for iter in range(iters):
        f, lik = elliptical(f, S, gpr_cauchy, (x, y, gamma, Kinv))
        g = g + f
        print('\r[iter %2d]' % (iter + 1))
        plot(xx[M:], f[M:])  # color='gray')
    print('')
    plot(x, y, 'bx', markersize=14)
    plot(xx[M:], g[M:] / iters, 'k', linewidth=3)
Exemplo n.º 15
0
def mvnpdf(data, means, covs):
    '''
    Compute multivariate normal log pdf

    Parameters
    ----------

    Returns
    -------

    '''
    logdets = [np.log(np.linalg.det(c)) for c in covs]
    ichol_sigmas = [inv(chol(c)) for c in covs]

    packed_params = util.pack_params(means, ichol_sigmas, logdets)
    packed_data = util.pad_data(data)
    return testmod.mvn_call(packed_data, packed_params, data.shape[1])
Exemplo n.º 16
0
def mvnpdf(data, means, covs):
    '''
    Compute multivariate normal log pdf

    Parameters
    ----------

    Returns
    -------

    '''
    logdets = [np.log(np.linalg.det(c)) for c in covs]
    ichol_sigmas = [inv(chol(c)) for c in covs]

    packed_params = util.pack_params(means, ichol_sigmas, logdets)
    packed_data = util.pad_data(data)
    return testmod.mvn_call(packed_data, packed_params,
                            data.shape[1])
Exemplo n.º 17
0
def mvnpdf_multi(data, means, covs, weights=None, logged=True,
                 get=True, order="F", datadim=None):
    """
    Multivariate normal density with multiple sets of parameters

    Parameters
    ----------
    data : ndarray (n x k)
    covs : sequence of 2d k x k matrices (length j)
    weights : ndarray (length j)
        Multiplier for component j, usually will sum to 1

    get = False leaves the result on the GPU
    without copying back.

    If data has already been padded, the original dimension
    must be passed in datadim

    It data is of GPUarray type, the data is assumed to be
    padded, and datadim will need to be passed if padding
    was needed.

    Returns
    -------
    densities : n x j
    """
    if logged:
        cu_func = cu_module.get_function('log_pdf_mvnormal')
    else:
        cu_func = cu_module.get_function('pdf_mvnormal')

    assert(len(covs) == len(means))

    ichol_sigmas = [linalg.inv(chol(c)) for c in covs]
    logdets = [-2.0*np.log(c.diagonal()).sum() for c in ichol_sigmas]

    if weights is None:
        weights = np.ones(len(means))

    packed_params = _pack_mvnpdf_params(means, ichol_sigmas, logdets, weights)

    return _multivariate_pdf_call(cu_func, data, packed_params,
                                  get, order, datadim)
Exemplo n.º 18
0
def mixture_loglike(data, thetas, covs, labels):

    n = len(data)
    likes = pdfs.mvnpdf(data, thetas, covs)
    loglike = likes.ravel('F').take(labels * n + np.arange(n)).sum()

    if np.isnan(loglike):
        return -1e300

    return loglike

    if np.isnan(likes).any():
        loglike = 0.
        for j, (theta, cov) in enumerate(zip(thetas, covs)):
            this_data = data[labels == j]
            ch = chol(cov)
            loglike += pm.mv_normal_chol_like(this_data, theta, ch)

        return loglike
Exemplo n.º 19
0
def mixture_loglike(data, thetas, covs, labels):

    n = len(data)
    likes = pdfs.mvnpdf(data, thetas, covs)
    loglike = likes.ravel('F').take(labels * n + np.arange(n)).sum()

    if np.isnan(loglike):
        return -1e300

    return loglike

    if np.isnan(likes).any():
        loglike = 0.
        for j, (theta, cov) in enumerate(zip(thetas, covs)):
            this_data = data[labels == j]
            ch = chol(cov)
            loglike += pm.mv_normal_chol_like(this_data, theta, ch)

        return loglike
Exemplo n.º 20
0
def mvnpdf_multi(data, means, covs, weights=None, logged=True,
                 get=True, order="F", datadim=None):
    """
    Multivariate normal density with multiple sets of parameters

    Parameters
    ----------
    data : ndarray (n x k)
    covs : sequence of 2d k x k matrices (length j)
    weights : ndarray (length j)
        Multiplier for component j, usually will sum to 1

    get = False leaves the result on the GPU
    without copying back.

    If data has already been padded, the orginal dimension
    must be passed in datadim

    It data is of GPUarray type, the data is assumed to be
    padded, and datadim will need to be passed if padding
    was needed.

    Returns
    -------
    densities : n x j
    """
    if logged:
        cu_func = cu_module.get_function('log_pdf_mvnormal')
    else:
        cu_func = cu_module.get_function('pdf_mvnormal')

    assert(len(covs) == len(means))

    ichol_sigmas = [LA.inv(chol(c)) for c in covs]
    logdets = [-2.0*np.log(c.diagonal()).sum() for c in ichol_sigmas]

    if weights is None:
        weights = np.ones(len(means))

    packed_params = _pack_mvnpdf_params(means, ichol_sigmas, logdets, weights)

    return _multivariate_pdf_call(cu_func, data, packed_params,
                                  get, order,datadim)
Exemplo n.º 21
0
    def calculate_global_params(self):
        """
        Calculate the global parameters (xbar, subject variances)
        :return: None
        """

        # Mean map
        self.xbar = np.mean(np.nanmean(self.real_maps, axis=1), axis=0)
        self.real_maps = self.real_maps - self.xbar

        # Variance array
        subject_var = []
        for subject in self.real_maps:
            subject_var.append(
                np.nanmean((np.nanmean(subject, axis=0) - self.xbar)**2))
        self.var_s = np.array(subject_var)

        # Covariance Matrix for generating subject components

        # Generate distance matrix
        print("Cov-mat")
        lst = []
        for i in range(0, 128):
            for j in range(0, 128):
                lst.append((i, j))
        dist_matrix = cdist(lst, lst)

        # Calculated parameters from the curve fitting (distance vs covariance)
        a = 0.12275941
        b = 0.10306618
        c = 0.64741083
        """
        # Median
        a = 0.11836497
        b = 0.33571855
        c = 0.42886912
        """

        ypred = self.exponential(dist_matrix, a, b, c)
        self.cov_mat = ypred.reshape(16384, 16384)
        self.chol_mat = chol(self.cov_mat)
        del dist_matrix, ypred
Exemplo n.º 22
0
def chol_inv(A):
    """
        CHOLINV
    
    [Description]
    Returns the inverse of a matrix by using Cholesky decomposition.
    
    [Input]
    A  : input array (D x D).
    
    [Output]
    Ai : Inverse of A (D x D).
    Ri : Inverted Cholesky factor.
    
    References:
        (N/A)
    
    Copyright (c) Michail D. Vrettas, PhD - November 2015.
    
    Last Updated: November 2015.
    """

    # Check if input is empty.
    if (A is None):
        print("Input matrix is None!")
        return None, None

    # Check if input is scalar.
    if np.isscalar(A):
        return 1.0 / A, 1.0 / np.sqrt(A)
    else:
        # If the input is vector.
        if (len(A.shape) == 1):
            # Transform it to diagonal matrix
            A = np.diag(A)

    # Try Cholesky decomposition.
    Ri = inv(chol(A))
    Ai = Ri.dot(Ri.T)

    # --->
    return Ai, Ri
Exemplo n.º 23
0
def gen_correlated(sigma, n, observed=None):
    """
    generate autocorrelated data according to the matrix
    sigma. if X is None, then data will be sampled from
    the uniform distibution. Otherwise, it will be sampled
    from X. Where X is then *all* observed
    p-values.
    """
    C = np.matrix(chol(sigma))
    if observed is None:
        X = np.random.uniform(0, 1, size=(n, sigma.shape[0]))
    else:
        assert n * sigma.shape[0] < observed.shape[0]
        idxs = np.random.random_integers(0, len(observed) - 1,
                                         size=sigma.shape[0] * n)
        X = observed[idxs].reshape((n, sigma.shape[0]))

    Q = np.matrix(qnorm(X))
    for row in  np.array(1 - norm.sf((Q * C).T)).T:
        yield row
def gen_correlated(sigma, n, observed=None):
    """
    generate autocorrelated data according to the matrix
    sigma. if X is None, then data will be sampled from
    the uniform distibution. Otherwise, it will be sampled
    from X. Where X is then *all* observed
    p-values.
    """
    C = np.matrix(chol(sigma))
    if observed is None:
        X = np.random.uniform(0, 1, size=(n, sigma.shape[0]))
    else:
        assert n * sigma.shape[0] < observed.shape[0]
        idxs = np.random.random_integers(0, len(observed) - 1,
                                         size=sigma.shape[0] * n)
        X = observed[idxs].reshape((n, sigma.shape[0]))

    Q = np.matrix(qnorm(X))
    for row in  np.array(pnorm((Q * C).T)).T:
        yield row
Exemplo n.º 25
0
 def L(self, X, y, grad=False):
     """ Computes the log likelihood and returns the gradient w.r.t. ls, amp,
     and noise if desired.
     """
     # Set up cache
     cache = {}
     K = self.K(X)
     Ki = np.linalg.inv(K)
     Kiy = Ki.dot(y)
     norm = len(X)
     cache['K'] = K
     cache['Ki'] = Ki
     cache['Kiy'] = Kiy
     cache['norm'] = norm
     # Compute L and dL
     L = -0.5*y.T.dot(Kiy).sum() - np.log(np.diag(chol(K))).sum()
     L -= len(X)/2 * np.log(2*np.pi)
     if grad:
         return L/norm, self.L_grad(X, y, cache)
     else:
         return L/norm
def stouffer_liptak(pvals, sigma=None, correction=False):
    """
    The stouffer_liptak correction.
    >>> stouffer_liptak([0.1, 0.2, 0.8, 0.12, 0.011])
    {'p': 0.0168..., 'C': 2.1228..., 'OK': True}

    >>> stouffer_liptak([0.5, 0.5, 0.5, 0.5, 0.5])
    {'p': 0.5, 'C': 0.0, 'OK': True}

    >>> stouffer_liptak([0.5, 0.1, 0.5, 0.5, 0.5])
    {'p': 0.28..., 'C': 0.57..., 'OK': True}

    >>> stouffer_liptak([0.5, 0.1, 0.1, 0.1, 0.5])
    {'p': 0.042..., 'C': 1.719..., 'OK': True}

    >>> stouffer_liptak([0.5], np.matrix([[1]]))
    {'p': 0.5...}
    """
    L = len(pvals)
    pvals = np.array(pvals, dtype=np.float64)
    pvals[pvals == 1] = 1.0 - 9e-16
    qvals = qnorm(1.0 - pvals, loc=0, scale=1).reshape(L, 1)
    if any(np.isinf(qvals)):
        raise Exception("bad values: %s" % pvals[list(np.isinf(qvals))])

    # dont do the correction unless sigma is specified.
    result = {"OK": True}
    if not sigma is None:
        try:
            C = chol(sigma)
            Cm1 = np.asmatrix(C).I # C^-1
            # qstar
            qvals = Cm1 * qvals
            result["OK"] = True
        except LinAlgError, e:
            print >>sys.stderr, e
            # cant do the correction non-invertible
            sigma *= 0.95
            np.fill_diagonal(sigma, 0.99)
            result["OK"] = False
Exemplo n.º 27
0
def stouffer_liptak(pvals, sigma=None):
    """
    The stouffer_liptak correction.
    >>> stouffer_liptak([0.1, 0.2, 0.8, 0.12, 0.011])
    {'p': 0.0168..., 'C': 2.1228..., 'OK': True}

    >>> stouffer_liptak([0.5, 0.5, 0.5, 0.5, 0.5])
    {'p': 0.5, 'C': 0.0, 'OK': True}

    >>> stouffer_liptak([0.5, 0.1, 0.5, 0.5, 0.5])
    {'p': 0.28..., 'C': 0.57..., 'OK': True}

    >>> stouffer_liptak([0.5, 0.1, 0.1, 0.1, 0.5])
    {'p': 0.042..., 'C': 1.719..., 'OK': True}

    >>> stouffer_liptak([0.5], np.matrix([[1]]))
    {'p': 0.5...}
    """
    L = len(pvals)
    pvals = np.array(pvals, dtype=np.float64)
    pvals[pvals == 1] = 1.0 - 9e-16
    qvals = norm.isf(pvals, loc=0, scale=1).reshape(L, 1)
    if any(np.isinf(qvals)):
        raise Exception("bad values: %s" % pvals[list(np.isinf(qvals))])

    # dont do the correction unless sigma is specified.
    result = {"OK": True}
    if not sigma is None:
        try:
            C = chol(sigma)
            Cm1 = np.asmatrix(C).I # C^-1
            # qstar
            qvals = Cm1 * qvals
        except LinAlgError, e:
            result["OK"] = False
            result = z_score_combine(pvals, sigma)
            return result
Exemplo n.º 28
0
def stouffer_liptak(pvals, sigma=None):
    """
    The stouffer_liptak correction.
    >>> stouffer_liptak([0.1, 0.2, 0.8, 0.12, 0.011])
    {'p': 0.0168..., 'C': 2.1228..., 'OK': True}

    >>> stouffer_liptak([0.5, 0.5, 0.5, 0.5, 0.5])
    {'p': 0.5, 'C': 0.0, 'OK': True}

    >>> stouffer_liptak([0.5, 0.1, 0.5, 0.5, 0.5])
    {'p': 0.28..., 'C': 0.57..., 'OK': True}

    >>> stouffer_liptak([0.5, 0.1, 0.1, 0.1, 0.5])
    {'p': 0.042..., 'C': 1.719..., 'OK': True}

    >>> stouffer_liptak([0.5], np.matrix([[1]]))
    {'p': 0.5...}
    """
    L = len(pvals)
    pvals = np.array(pvals, dtype=np.float64)
    pvals[pvals == 1] = 1.0 - 9e-16
    qvals = norm.isf(pvals, loc=0, scale=1).reshape(L, 1)
    if any(np.isinf(qvals)):
        raise Exception("bad values: %s" % pvals[list(np.isinf(qvals))])

    # dont do the correction unless sigma is specified.
    result = {"OK": True}
    if not sigma is None:
        try:
            C = chol(sigma)
            Cm1 = np.asmatrix(C).I  # C^-1
            # qstar
            qvals = Cm1 * qvals
        except LinAlgError, e:
            result["OK"] = False
            result = z_score_combine(pvals, sigma)
            return result
Exemplo n.º 29
0
    def post(self, hyp, covfunc, X, y):
        """ Generic function to compute posterior distribution.
        """

        if len(X.shape) == 1:
            X = X[:, np.newaxis]
        self.N, self.D = X.shape

        if not self._updatepost(hyp, covfunc):
            print("hyperparameters have not changed, using exising posterior")
            return

        # hyperparameters
        sn2 = np.exp(2 * hyp[0])  # noise variance
        theta = hyp[1:]  # (generic) covariance hyperparameters

        if self.verbose:
            print("estimating posterior ... | hyp=", hyp)

        self.K = covfunc.cov(theta, X)
        self.L = chol(self.K + sn2 * np.eye(self.N))
        self.alpha = solve(self.L.T, solve(self.L, y))
        self.hyp = hyp
        self.covfunc = covfunc
Exemplo n.º 30
0
def log_det(A):
    """
        LOG(DET)
    
    [Description]
    Returns the log(det(A)), but more stable and accurate.
    
    [Input]
    A : input array (D x D).
    
    [Output]
    X : log(det(A)) (D x D).
    
    References:
        (N/A)
    
    Copyright (c) Michail D. Vrettas - November 2015.
    
    Last Updated: November 2015.
    """
    # Check if input is empty.
    if (A is None):
        print("Input matrix is None!")
        return None

    # Check if input is scalar.
    if np.isscalar(A):
        return log(A)
    else:
        # If the input is vector.
        if (len(A.shape) == 1):
            # Transform it to diagonal matrix
            A = np.diag(A)

    # --->
    return 2 * np.sum(np.log(chol(A).diagonal()))
Exemplo n.º 31
0
def ut_approx(f, xbar, Pxx, *args):
    """
        UNSCENTED TRANSFORMATION
    
    [Description]
    This method computes the approximate values for the mean and
    the covariance of a multivariate random variable. To achieve
    that, the "Unscented Transformation" (UT) is used.
    
    [Input]
    f     : function of the nonlinear transformation of the state vector.
    xbar  : current mean of the state vector        (1 x D).
    Pxx   : current covarriance of the state vector (D x D).
    *args : additional parameter for the "f" function.
    
    [Output]
    ybar  : estimated mean after nonlinear transformation       (1 x K).
    Pyy   : estimated covariance after nonlinear transformation (K x K).
    
    Reference:
    @INPROCEEDINGS{Julier99thescaled,
        author = {Simon J. Julier},
        title = {The Scaled Unscented Transformation},
        booktitle = {},
        year = {1999},
        pages = {4555--4559}
    }
    
    Copyright (c) Michail D. Vrettas, PhD - November 2015.
    
    Last Update: November 2015.
    """

    # Get the dimensions of the state vector.
    if (len(xbar.shape) == 1):
        D = xbar.shape[0]
    else:
        D = xbar.shape[1]

    # Total number of sigma points.
    M = (2 * D + 1)

    # Scaling factor.
    k = 1.05 * D

    # Use Cholesky to get the lower triangular matrix.
    try:
        sPxx = chol((D + k) * Pxx).T
    except LinAlgError:
        sPxx = chol(Pxx * eye(D)).T

    # Replicate the array.
    xMat = np.tile(xbar, (D, 1))

    # Put all sigma points together.
    chi = np.concatenate((xbar[np.newaxis, :], (xMat + sPxx), (xMat - sPxx)))

    # Compute the weights.
    wList = [k / (D + k)]
    wList.extend([1.0 / (2.0 * (D + k))] * (M - 1))
    weights = np.reshape(np.array(wList), (1, M))

    # Propagate the new points through the nonlinear transformation.
    Y = f(chi, *args)

    # Compute the new approximate mean.
    ybar = weights.dot(Y)

    # Compute the approximate covariance.
    wM = np.eye(M) - np.tile(weights, (M, 1))
    Q = wM.dot(np.diag(weights.ravel())).dot(wM.T)

    # Compute the new approximate covariance.
    Pyy = Y.T.dot(Q).dot(Y)

    # --->
    return ybar, Pyy
Exemplo n.º 32
0
def coint_johansen(x, p, k, coint_trend=None):

    #    % error checking on inputs
    #    if (nargin ~= 3)
    #     error('Wrong # of inputs to johansen')
    #    end
    nobs, m = x.shape

    #why this?  f is detrend transformed series, p is detrend data
    if (p > -1):
        f = 0
    else:
        f = p

    if coint_trend is not None:
        f = coint_trend  #matlab has separate options

    x     = detrend(x,p)
    dx    = tdiff(x,1, axis=0)
    #dx    = trimr(dx,1,0)
    z     = mlag(dx,k)#[k-1:]
    # print z.shape
    z = trimr(z,k,0)
    z     = detrend(z,f)
    # print dx.shape
    dx = trimr(dx,k,0)

    dx    = detrend(dx,f)
    #r0t   = dx - z*(z\dx)
    r0t   = resid(dx, z)  #diff on lagged diffs
    #lx = trimr(lag(x,k),k,0)
    lx = lag(x,k)
    lx = trimr(lx, 1, 0)
    dx    = detrend(lx,f)
    # print 'rkt', dx.shape, z.shape
    #rkt   = dx - z*(z\dx)
    rkt   = resid(dx, z)  #level on lagged diffs
    skk   = np.dot(rkt.T, rkt) / rows(rkt)
    sk0   = np.dot(rkt.T, r0t) / rows(rkt)
    s00   = np.dot(r0t.T, r0t) / rows(r0t)
    sig   = np.dot(sk0, np.dot(inv(s00), (sk0.T)))
    tmp   = inv(skk)
    #du, au = eig(np.dot(tmp, sig))
    au, du = eig(np.dot(tmp, sig))  #au is eval, du is evec
    #orig = np.dot(tmp, sig)

    #% Normalize the eigen vectors such that (du'skk*du) = I
    temp   = inv(chol(np.dot(du.T, np.dot(skk, du))))
    dt     = np.dot(du, temp)


    #JP: the next part can be done much  easier

    #%      NOTE: At this point, the eigenvectors are aligned by column. To
    #%            physically move the column elements using the MATLAB sort,
    #%            take the transpose to put the eigenvectors across the row

    #dt = transpose(dt)

    #% sort eigenvalues and vectors

    #au, auind = np.sort(diag(au))
    auind = np.argsort(au)
    #a = flipud(au)
    aind = flipud(auind)
    a = au[aind]
    #d = dt[aind,:]
    d = dt[:,aind]

    #%NOTE: The eigenvectors have been sorted by row based on auind and moved to array "d".
    #%      Put the eigenvectors back in column format after the sort by taking the
    #%      transpose of "d". Since the eigenvectors have been physically moved, there is
    #%      no need for aind at all. To preserve existing programming, aind is reset back to
    #%      1, 2, 3, ....

    #d  =  transpose(d)
    #test = np.dot(transpose(d), np.dot(skk, d))

    #%EXPLANATION:  The MATLAB sort function sorts from low to high. The flip realigns
    #%auind to go from the largest to the smallest eigenvalue (now aind). The original procedure
    #%physically moved the rows of dt (to d) based on the alignment in aind and then used
    #%aind as a column index to address the eigenvectors from high to low. This is a double
    #%sort. If you wanted to extract the eigenvector corresponding to the largest eigenvalue by,
    #%using aind as a reference, you would get the correct eigenvector, but with sorted
    #%coefficients and, therefore, any follow-on calculation would seem to be in error.
    #%If alternative programming methods are used to evaluate the eigenvalues, e.g. Frame method
    #%followed by a root extraction on the characteristic equation, then the roots can be
    #%quickly sorted. One by one, the corresponding eigenvectors can be generated. The resultant
    #%array can be operated on using the Cholesky transformation, which enables a unit
    #%diagonalization of skk. But nowhere along the way are the coefficients within the
    #%eigenvector array ever changed. The final value of the "beta" array using either method
    #%should be the same.


    #% Compute the trace and max eigenvalue statistics */
    lr1 = zeros(m)
    lr2 = zeros(m)
    cvm = zeros((m,3))
    cvt = zeros((m,3))
    iota = ones(m)
    t, junk = rkt.shape
    for i in range(0, m):
        tmp = trimr(np.log(iota-a), i ,0)
        lr1[i] = -t * np.sum(tmp, 0)  #columnsum ?
        #tmp = np.log(1-a)
        #lr1[i] = -t * np.sum(tmp[i:])
        lr2[i] = -t * np.log(1-a[i])
        cvm[i,:] = c_sja(m-i,p)
        cvt[i,:] = c_sjt(m-i,p)
        aind[i]  = i
    #end

    result = Holder()
    #% set up results structure
    #estimation results, residuals
    result.rkt = rkt
    result.r0t = r0t
    result.eig = a
    result.evec = d  #transposed compared to matlab ?
    result.lr1 = lr1
    result.lr2 = lr2
    result.cvt = cvt
    result.cvm = cvm
    result.ind = aind
    result.meth = 'johansen'

    return result
Exemplo n.º 33
0
 def _chol_sigma_u(self):
     return chol(self.sigma_u)
Exemplo n.º 34
0
import numpy as np
from numpy.random import randn
from numpy.random import standard_normal as randnt
from numpy.linalg import cholesky as chol

import chol_diff

# Example setup:
N = 500
A = np.cov(randn(N, 3*N)) # a symmetric +ve definite matrix
L = chol(A)               # lower-triangular Cholesky decomposition
Lbar = np.tril(randnt(L.shape)) # declare what dF/dL is for some function F

# Push the derivative back through the Cholesky, Abar = dF/d(tril(A))
Abar = chol_diff.chol_rev(L, Lbar)

# Perturb the input, look at the perturbation of the output and check
# for consistency.
dA = np.cov(randn(N, 3*N))
eps = 1e-5
dL = (chol(A + (eps/2)*dA) - chol(A - (eps/2)*dA)) / eps
dF1 = np.dot(dL.ravel(), Lbar.ravel())
dF2 = np.dot(dA.ravel(), Abar.ravel())

if chol_diff.FORTRAN_COMPILED:
    print('Using Fortran version:')
else:
    print('Using pure Python version:')
print('Error: %g' % float(dF1 - dF2))

Exemplo n.º 35
0
def coint_johansen(x, p, k, coint_trend=None):

    #    % error checking on inputs
    #    if (nargin ~= 3)
    #     error('Wrong # of inputs to johansen')
    #    end
    nobs, m = x.shape
    #print 'x: %s' % (x)
    #why this?  f is detrend transformed series, p is detrend data
    if (p > -1):
        f = 0
    else:
        f = p

    if coint_trend is not None:
        f = coint_trend  #matlab has separate options

    x = detrend(x, p)
    dx = tdiff(x, 1, axis=0)
    #dx    = trimr(dx,1,0)
    z = mlag(dx, k)  #[k-1:]
    #print z.shape
    z = trimr(z, k, 0)
    z = detrend(z, f)
    #print 'dx: %s z: %s' % (dx, z)
    dx = trimr(dx, k, 0)

    dx = detrend(dx, f)
    #r0t   = dx - z*(z\dx)

    r0t = resid(dx, z)  #diff on lagged diffs
    #lx = trimr(lag(x,k),k,0)
    lx = lag(x, k)
    lx = trimr(lx, 1, 0)
    dx = detrend(lx, f)
    print 'rkt', dx.shape, z.shape
    #rkt   = dx - z*(z\dx)
    rkt = resid(dx, z)  #level on lagged diffs
    skk = np.dot(rkt.T, rkt) / rows(rkt)
    sk0 = np.dot(rkt.T, r0t) / rows(rkt)
    s00 = np.dot(r0t.T, r0t) / rows(r0t)
    sig = np.dot(sk0, np.dot(inv(s00), (sk0.T)))
    tmp = inv(skk)
    #du, au = eig(np.dot(tmp, sig))
    au, du = eig(np.dot(tmp, sig))  #au is eval, du is evec
    #orig = np.dot(tmp, sig)

    #% Normalize the eigen vectors such that (du'skk*du) = I
    temp = inv(chol(np.dot(du.T, np.dot(skk, du))))
    dt = np.dot(du, temp)

    #JP: the next part can be done much  easier

    #%      NOTE: At this point, the eigenvectors are aligned by column. To
    #%            physically move the column elements using the MATLAB sort,
    #%            take the transpose to put the eigenvectors across the row

    #dt = transpose(dt)

    #% sort eigenvalues and vectors

    #au, auind = np.sort(diag(au))
    auind = np.argsort(au)
    #a = flipud(au)
    aind = flipud(auind)
    a = au[aind]
    #d = dt[aind,:]
    d = dt[:, aind]

    #%NOTE: The eigenvectors have been sorted by row based on auind and moved to array "d".
    #%      Put the eigenvectors back in column format after the sort by taking the
    #%      transpose of "d". Since the eigenvectors have been physically moved, there is
    #%      no need for aind at all. To preserve existing programming, aind is reset back to
    #%      1, 2, 3, ....

    #d  =  transpose(d)
    #test = np.dot(transpose(d), np.dot(skk, d))

    #%EXPLANATION:  The MATLAB sort function sorts from low to high. The flip realigns
    #%auind to go from the largest to the smallest eigenvalue (now aind). The original procedure
    #%physically moved the rows of dt (to d) based on the alignment in aind and then used
    #%aind as a column index to address the eigenvectors from high to low. This is a double
    #%sort. If you wanted to extract the eigenvector corresponding to the largest eigenvalue by,
    #%using aind as a reference, you would get the correct eigenvector, but with sorted
    #%coefficients and, therefore, any follow-on calculation would seem to be in error.
    #%If alternative programming methods are used to evaluate the eigenvalues, e.g. Frame method
    #%followed by a root extraction on the characteristic equation, then the roots can be
    #%quickly sorted. One by one, the corresponding eigenvectors can be generated. The resultant
    #%array can be operated on using the Cholesky transformation, which enables a unit
    #%diagonalization of skk. But nowhere along the way are the coefficients within the
    #%eigenvector array ever changed. The final value of the "beta" array using either method
    #%should be the same.

    #% Compute the trace and max eigenvalue statistics */
    lr1 = zeros(m)
    lr2 = zeros(m)
    cvm = zeros((m, 3))
    cvt = zeros((m, 3))
    iota = ones(m)
    t, junk = rkt.shape
    for i in range(0, m):
        tmp = trimr(log(iota - a), i, 0)
        lr1[i] = -t * np.sum(tmp, 0)  #columnsum ?
        #tmp = np.log(1-a)
        #lr1[i] = -t * np.sum(tmp[i:])
        lr2[i] = -t * log(1 - a[i])
        cvm[i, :] = c_sja(m - i, p)
        cvt[i, :] = c_sjt(m - i, p)
        aind[i] = i
    #end

    result = Holder()
    #% set up results structure
    #estimation results, residuals
    result.rkt = rkt
    result.r0t = r0t
    result.eig = a
    result.evec = d  #transposed compared to matlab ?
    result.lr1 = lr1
    result.lr2 = lr2
    result.cvt = cvt
    result.cvm = cvm
    result.ind = aind
    result.meth = 'johansen'

    return result
Exemplo n.º 36
0
    H = P * A

    # Calculate single-antenna MI
    sa_pfm[w_id] = log2(1 + w0/N0)

    # Calculate spatial-modulation MI
    sym_dm_mi = 1/2 * (log2(1 + w0/N0) + log2(1 + w1/N0))

    ant_dm_mi = 0
    sig_mat_0 = N0*np.eye(Nr) + H[:, 0] * H[:, 0].H
    sig_mat_1 = N0*np.eye(Nr) + H[:, 1] * H[:, 1].H
    for n in range(2):
        sig_mat_n = N0*np.eye(Nr) + H[:, n] * H[:, n].H
        zero_mu = np.zeros(shape=[Nr, 1], dtype='float')
        
        sig_mat_n_half = chol(sig_mat_n)
        y_set = sig_mat_n_half * cgss_vec_set

        temp_set = log2(c_gauss_2d(y_set, zero_mu, sig_mat_n)*2 / (c_gauss_2d(y_set, zero_mu, sig_mat_0) + c_gauss_2d(y_set, zero_mu, sig_mat_1)))
        ant_dm_mi += 1/2 * np.mean(temp_set)

    ant_dm_mi_apprx = 1 - log2(exp(1))/2 * (2+1/N0) / ((1+w0/(2*N0))*(1+w1/(2*N0)) - w0*w1*g_phi/(4*N0**2 * Nr**2))
    
    sm_pfm[w_id] = sym_dm_mi + ant_dm_mi
    sm_pfm_aprx[w_id] = sym_dm_mi + ant_dm_mi_apprx

plt.plot(w_rng, sa_pfm, 'b-', label='single-ant')
plt.plot(w_rng, sm_pfm, 'r-', label='sm')
plt.plot(w_rng, sm_pfm_aprx, 'k--', label='sm apprx')
plt.legend()
plt.grid()
Exemplo n.º 37
0
if __name__ == '__main__':
    testmod.set_device(0)

    n = 1e3
    k = 16

    data = randn(n, k).astype(np.float32)
    mean = randn(k)
    cov = np.array(util.random_cov(k), dtype=np.float32)

    j = 32

    padded_data = util.pad_data(data)

    chol_sigma = chol(cov)
    ichol_sigma = L.inv(chol_sigma)
    logdet = np.log(np.linalg.det(cov))

    means = (mean,) * j
    covs = (ichol_sigma,) * j
    logdets = (logdet,) * j

    packed_params = util.pack_params(means, covs, logdets)

    cpu_func = lambda: testmod.cpu_mvnpdf(padded_data, packed_params, k).squeeze()
    gpu_func = lambda: testmod._mvnpdf(padded_data, packed_params, k).squeeze()

    print cpu_func()
    print gpu_func()
Exemplo n.º 38
0
if __name__ == '__main__':
    testmod.set_device(0)

    n = 1e3
    k = 16

    data = randn(n, k).astype(np.float32)
    mean = randn(k)
    cov = np.array(util.random_cov(k), dtype=np.float32)

    j = 32

    padded_data = util.pad_data(data)

    chol_sigma = chol(cov)
    ichol_sigma = L.inv(chol_sigma)
    logdet = np.log(np.linalg.det(cov))

    means = (mean, ) * j
    covs = (ichol_sigma, ) * j
    logdets = (logdet, ) * j

    packed_params = util.pack_params(means, covs, logdets)

    cpu_func = lambda: testmod.cpu_mvnpdf(padded_data, packed_params, k
                                          ).squeeze()
    gpu_func = lambda: testmod._mvnpdf(padded_data, packed_params, k).squeeze()

    print cpu_func()
    print gpu_func()
Exemplo n.º 39
0
import numpy as np
from numpy.random import randn
from numpy.random import standard_normal as randnt
from numpy.linalg import cholesky as chol

import chol_diff

# Example setup:
N = 500
A = np.cov(randn(N, 3 * N))  # a symmetric +ve definite matrix
L = chol(A)  # lower-triangular Cholesky decomposition
Lbar = np.tril(randnt(L.shape))  # declare what dF/dL is for some function F

# Push the derivative back through the Cholesky, Abar = dF/d(tril(A))
Abar = chol_diff.chol_rev(L, Lbar)

# Perturb the input, look at the perturbation of the output and check
# for consistency.
dA = np.cov(randn(N, 3 * N))
eps = 1e-5
dL = (chol(A + (eps / 2) * dA) - chol(A - (eps / 2) * dA)) / eps
dF1 = np.dot(dL.ravel(), Lbar.ravel())
dF2 = np.dot(dA.ravel(), Abar.ravel())

if chol_diff.FORTRAN_COMPILED:
    print('Using Fortran version:')
else:
    print('Using pure Python version:')
print('Error: %g' % float(dF1 - dF2))
Exemplo n.º 40
0
# since then there is L, such that L^T.L = K, thus K is a Gramian,
# thus it is p.sd. by construction.  (Symmetry is trivial, >= 0,
# because every component of rows of L is real, thus it's square
# is >= 0).

'''
semipositive definite proof: The only restriction on the kernel function is that the covariance matrix given by
(6.62) must be positive definite. If λi is an eigenvalue ofK,
then the corresponding eigenvalue of C will be λi + β−1.
It is therefore sufficient that the kernel matrix k(xn,xm) be
positive semidefinite for any pair of points xn and xm, so that λi ? 0,
because any eigenvalue λi that is zero will still give rise to a positive eigenvalue for
C because β> 0. [Bisschop 308]
'''

L = chol(K)
# this gives an error. is something wrong with the kernel?
l = rank(L)
# it would be surprising, if this was >= 0

# q4
mu = np.zeros(len(x))
# for some reason it errors on singularity?
prior = mv(mu, K, allow_singular=1)
samples = prior.rvs(5)

fig, ax = subplots(1, 1)
for i in samples:
    ax.plot(x, i)
savefig('../Figures/ex_1_test')
p,N = y.shape
x = np.arange(1, N+1) / N
c=100
d=1
r=1e-05
K=np.zeros((N,N))

for ii in xrange(N):
    for jj in xrange(N):
        dist_ii_jj = np.abs(x[ii] - x[jj])
        K[ii,jj] = d * np.exp(- c * (dist_ii_jj ** 2))

K = K + diagonal(r * np.ones((1,N)))
print diagonal(K)
invK = inv(K)
logdetK=2 * np.sum(np.log(diagonal( chol(K))))

prior_params.K.c_prior=1
prior_params.K.invK=invK
prior_params.K.K=K
prior_params.K.logdetK=logdetK
prior_params.sig.a_sig=1
prior_params.sig.b_sig=0.1
prior_params.hypers.a_phi=1.5
prior_params.hypers.b_phi=1.5
prior_params.hypers.a1=10
prior_params.hypers.a2=10

settings.L=10
settings.k=20
settings.Niter=10000
Exemplo n.º 42
0
 def __init__(self, C):
     self.L = chol(C)  # left Cholesky factor of C (lower triangular)
     self.C = C