Esempio n. 1
0
def test_gp():

    # Generate data
    func = lambda x: np.sin(x*2*np.pi/5)
    x = np.random.uniform(low=0, high=10, size=(100,))
    f = func(x)
    f = f + np.random.normal(0, 0.2, np.shape(f))
    plt.clf()
    plt.plot(x,f,'r+')
    #plt.plot(x,y,'r+')

    # Construct model
    ls = EF.NodeConstantScalar(1.5, name='lengthscale')
    amp = EF.NodeConstantScalar(2.0, name='amplitude')
    noise = EF.NodeConstantScalar(0.6, name='noise')
    K = CF.SquaredExponential(amp, ls)
    K_noise = CF.Delta(noise)
    K_sum = CF.Sum(K, K_noise)

    M = GP.Constant(lambda x: (x/10-2)*(x/10+1))

    method = 'multi'

    if method == 'sum':
        # Sum GP
        F = GP.GaussianProcess(M, K_sum)

    elif method == 'multi':
        # Joint for latent function and observation process
        M_multi = GP.Multiple([M, M])

        K_zeros = CF.Zeros()

        
        #K_multi = CF.Multiple([[K, K],[K,K_sum]])
        K_multi1 = CF.Multiple([[K, K],[K,K]])
        #K_multi2 = CF.Multiple([[None, None],[None, K_noise]])
        K_multi2 = CF.Multiple([[K_zeros, K_zeros],[K_zeros,K_noise]])
        
        xp = np.arange(0,5,1)
        F = GP.GaussianProcess(M_multi, K_multi1,
                               k_sparse=K_multi2,
                               #pseudoinputs=[[],xp])
                               pseudoinputs=None)
        #F = GP.GaussianProcess(M_multi, K_multi, pseudoinputs=[[],xp])
        # Observations are from the latter process:
        #xf = np.array([])
        #x_pseudo = [[], x]
        #x_full = [np.array([15, 20]), []]
        #xy = x
        #x = [xf, xy]
        #f = np.concatenate([func(xf), f])
        #f_pseudo = f
        #f_full = func(x_full)
        x = [[], x]
        

    # Inference
    #F.observe(x_pseudo, f_pseudo, pseudo=True)
    F.observe(x, f)
    utils.vb_optimize_nodes(ls, amp, noise)
    F.update()
    u = F.get_parameters()

    print('parameters')
    print(ls.name)
    print(ls.u[0])
    print(amp.name)
    print(amp.u[0])
    print(noise.name)
    print(noise.u[0])

    #print(F.lower_bound_contribution())

    # Posterior predictions
    xh = np.arange(-5, 20, 0.1)
    if method == 'multi':
        # Choose which process you want to examine:
        (fh, varfh) = u([[],xh], covariance=1)
        #(fh, varfh) = u([xh,[]], covariance=1)
    else:
        (fh, varfh) = u(xh, covariance=1)

    #print(fh)
    #print(np.shape(fh))
    #print(np.shape(varfh))

    varfh[varfh<0] = 0
    errfh = np.sqrt(varfh)
    #print(varfh)
    #print(errfh)
    m_errorplot(xh, fh, errfh, errfh)
    
    return
    
    # Construct a GP
    k = gp_cov_se(magnitude=theta1, lengthscale=theta2)
    f = NodeGP(0, k)
    f.observe(x, y)
    f.update()
    (mp, kp) = f.get_parameters()
Esempio n. 2
0
def test_sparse_gp():
    
    ## Generate data

    # Noisy observations from a sinusoid
    N = 10000
    func = lambda x: np.sin(x*2*np.pi/20)
    x = np.random.uniform(low=0, high=N, size=(N,))
    f = func(x)
    y = f + np.random.normal(0, 0.2, np.shape(f))

    # Plot data
    plt.clf()
    plt.plot(x,y,'r+')

    ## Construct model

    # Covariance function stuff
    ls = EF.NodeConstantScalar(3, name='lengthscale')
    amp = EF.NodeConstantScalar(2.0, name='amplitude')
    noise = EF.NodeConstantScalar(0.6, name='noise')
    # Latent process covariance
    #K_f = CF.SquaredExponential(amp, ls)
    K_f = CF.PiecewisePolynomial2(amp, ls)
    # Noise process covariance
    K_noise = CF.Delta(noise)
    # Observation process covariance
    K_y = CF.Sum(K_f, K_noise)
    # Joint covariance
    #K_joint = CF.Multiple([[K_f, K_f],[K_f,K_y]], sparse=True)

    # Mean function stuff
    M = GP.Constant(lambda x: (x/10-2)*(x/10+1))
    # Means for latent and observation processes
    #M_multi = GP.Multiple([M, M])

    # Gaussian process
    F = GP.GaussianProcess(M, [[K_f, K_f], [K_f, K_y]])
    #F = GP.GaussianProcess(M, [[K_f, K_f], [K_f, K_y]])
    #F = GP.GaussianProcess(M_multi, K_joint)

    ## Inference
    F.observe([[],x], y)
    utils.vb_optimize_nodes(ls, amp, noise)
    F.update()
    u = F.get_parameters()

    ## Show results

    # Print hyperparameters
    print('parameters')
    print(ls.name, ls.u[0])
    print(amp.name, amp.u[0])
    print(noise.name, noise.u[0])

    # Posterior predictions
    xh = np.arange(np.min(x)-5, np.max(x)+10, 0.1)
    (fh, varfh) = u([[],xh], covariance=1)
    #(fh, varfh) = u([xh,[]], covariance=1)

    # Plot predictive distribution
    varfh[varfh<0] = 0
    errfh = np.sqrt(varfh)
    m_errorplot(xh, fh, errfh, errfh)
    
    return