示例#1
0
def gpfa_model(x_a, x_s, n_a, n_s, D):
    # This?
    for i in range(D):
        amp[i] = EF.Delta(name='amplitude-' + str(i))
        ls[i] = EF.Delta(name='lengthscale-' + str(i))
        cf[i] = CF.PiecewisePolynomial2(amp[i], ls[i])
    cf_a = CF.Multiple(cf)
    a = GP.GaussianProcess(0, cf_a, name='a')
    indices = np.arange(D * n_a).reshape(
        (n_a, D))  # maybe some better way to express this?
    x_A = [x_a] * D
    A = GP.ProcessToVector(a, x_A, indices)
    # This?
    amp = EF.Delta(name='amplitude', plates=(D, ))
    ls = EF.Delta(name='lengthscale', plates=(D, ))
    cf = CF.PiecewisePolynomial2(amp, ls)
    cf_a = CF.Multiple(cf)
    a = GP.GaussianProcess(0, cf_a, name='a')
    A = GP.ProcessToVector(a, x_a)
示例#2
0
    def __init__(self, m, k, k_sparse=None, pseudoinputs=None, **kwargs):

        self.x = np.array([])
        self.f = np.array([])
        ## self.x_obs = np.zeros((0,1))
        ## self.f_obs = np.zeros((0,))

        if pseudoinputs != None:
            pseudoinputs = EF.NodeConstant([pseudoinputs],
                                           dims=[np.shape(pseudoinputs)])

        # By default, posterior == prior
        self.m = None  #m
        self.k = None  #k

        if isinstance(k, list) and isinstance(m, list):
            if len(k) != len(m):
                raise Exception(
                    'The number of mean and covariance functions must be equal.'
                )
            k = CF.Multiple(k)
            m = Multiple(m)
        elif isinstance(k, list):
            D = len(k)
            k = CF.Multiple(k)
            m = Multiple(D * [m])
        elif isinstance(m, list):
            D = len(m)
            k = CF.Multiple(D * [k])
            m = Multiple(m)

        # Ignore plates
        EF.NodeVariable.__init__(self,
                                 m,
                                 k,
                                 k_sparse,
                                 pseudoinputs,
                                 plates=(),
                                 dims=[(np.inf, ), (np.inf, np.inf)],
                                 **kwargs)
示例#3
0
文件: vmp.py 项目: willu47/bayespy
def test_gp():

    # Generate data
    func = lambda x: np.sin(x*2*np.pi/5)
    x = np.random.uniform(low=0, high=10, size=(100,))
    f = func(x)
    f = f + np.random.normal(0, 0.2, np.shape(f))
    plt.clf()
    plt.plot(x,f,'r+')
    #plt.plot(x,y,'r+')

    # Construct model
    ls = EF.NodeConstantScalar(1.5, name='lengthscale')
    amp = EF.NodeConstantScalar(2.0, name='amplitude')
    noise = EF.NodeConstantScalar(0.6, name='noise')
    K = CF.SquaredExponential(amp, ls)
    K_noise = CF.Delta(noise)
    K_sum = CF.Sum(K, K_noise)

    M = GP.Constant(lambda x: (x/10-2)*(x/10+1))

    method = 'multi'

    if method == 'sum':
        # Sum GP
        F = GP.GaussianProcess(M, K_sum)

    elif method == 'multi':
        # Joint for latent function and observation process
        M_multi = GP.Multiple([M, M])

        K_zeros = CF.Zeros()

        
        #K_multi = CF.Multiple([[K, K],[K,K_sum]])
        K_multi1 = CF.Multiple([[K, K],[K,K]])
        #K_multi2 = CF.Multiple([[None, None],[None, K_noise]])
        K_multi2 = CF.Multiple([[K_zeros, K_zeros],[K_zeros,K_noise]])
        
        xp = np.arange(0,5,1)
        F = GP.GaussianProcess(M_multi, K_multi1,
                               k_sparse=K_multi2,
                               #pseudoinputs=[[],xp])
                               pseudoinputs=None)
        #F = GP.GaussianProcess(M_multi, K_multi, pseudoinputs=[[],xp])
        # Observations are from the latter process:
        #xf = np.array([])
        #x_pseudo = [[], x]
        #x_full = [np.array([15, 20]), []]
        #xy = x
        #x = [xf, xy]
        #f = np.concatenate([func(xf), f])
        #f_pseudo = f
        #f_full = func(x_full)
        x = [[], x]
        

    # Inference
    #F.observe(x_pseudo, f_pseudo, pseudo=True)
    F.observe(x, f)
    utils.vb_optimize_nodes(ls, amp, noise)
    F.update()
    u = F.get_parameters()

    print('parameters')
    print(ls.name)
    print(ls.u[0])
    print(amp.name)
    print(amp.u[0])
    print(noise.name)
    print(noise.u[0])

    #print(F.lower_bound_contribution())

    # Posterior predictions
    xh = np.arange(-5, 20, 0.1)
    if method == 'multi':
        # Choose which process you want to examine:
        (fh, varfh) = u([[],xh], covariance=1)
        #(fh, varfh) = u([xh,[]], covariance=1)
    else:
        (fh, varfh) = u(xh, covariance=1)

    #print(fh)
    #print(np.shape(fh))
    #print(np.shape(varfh))

    varfh[varfh<0] = 0
    errfh = np.sqrt(varfh)
    #print(varfh)
    #print(errfh)
    m_errorplot(xh, fh, errfh, errfh)
    
    return
    
    # Construct a GP
    k = gp_cov_se(magnitude=theta1, lengthscale=theta2)
    f = NodeGP(0, k)
    f.observe(x, y)
    f.update()
    (mp, kp) = f.get_parameters()