Exemple #1
0
def run_demo():
    LG.basicConfig(level=LG.DEBUG)
    SP.random.seed(10)

    #1. create toy data
    x, y, z, sigma, X, actual_inv, L = create_toy_data()
    n_dimensions = 1
    n_terms = 3
    # build GP
    likelihood = lik.GaussLikISO()
    covar_parms = SP.log([1, 1, 1E-5])
    hyperparams = {
        'covar': covar_parms,
        'lik': SP.log([sigma]),
        'warping': (1E-2 * SP.random.randn(n_terms, 3))
    }

    SECF = se.SqexpCFARD(n_dimensions=n_dimensions)
    muCF = mu.MuCF(N=X.shape[0])
    covar = combinators.SumCF([SECF, muCF])
    warping_function = TanhWarpingFunction(n_terms=n_terms)
    mean_function = LinMeanFunction(X=SP.ones([x.shape[0], 1]))
    hyperparams['mean'] = 1E-2 * SP.randn(1)
    bounds = {}
    bounds.update(warping_function.get_bounds())

    gp = WARPEDGP(warping_function=warping_function,
                  mean_function=mean_function,
                  covar_func=covar,
                  likelihood=likelihood,
                  x=x,
                  y=z)
    opt_model_params = opt.opt_hyper(gp,
                                     hyperparams,
                                     bounds=bounds,
                                     gradcheck=True)[0]

    print "WARPED GP (neg) likelihood: ", gp.LML(hyperparams)

    #hyperparams['mean'] = SP.log(1)
    PL.figure()
    PL.plot(z)
    PL.plot(warping_function.f(y, hyperparams['warping']))
    PL.legend(["real function", "larnt function"])

    PL.figure()
    PL.plot(actual_inv)
    PL.plot(warping_function.f_inv(gp.y, hyperparams['warping']))
    PL.legend(['real inverse', 'learnt inverse'])

    hyperparams.pop("warping")
    hyperparams.pop("mean")
    gp = GP(covar, likelihood=likelihood, x=x, y=y)
    opt_model_params = opt.opt_hyper(gp, hyperparams, gradcheck=False)[0]
    print "GP (neg) likelihood: ", gp.LML(hyperparams)
Exemple #2
0
def run_demo():
    LG.basicConfig(level=LG.INFO)

    #1. simulate data from a linear PCA model
    N = 25
    K = 5
    D = 200

    SP.random.seed(1)
    S = SP.random.randn(N,K)
    W = SP.random.randn(D,K)

    Y = SP.dot(W,S.T).T

    Y+= 0.5*SP.random.randn(N,D)

    #use "standard PCA"
    [Spca,Wpca] = gplvm.PCA(Y,K)

    #reconstruction
    Y_ = SP.dot(Spca,Wpca.T)

    if 1:
        #use linear kernel
        covariance = linear.LinearCFISO(n_dimensions=K)
        hyperparams = {'covar': SP.log([1.2])}
    if 0:
        #use ARD kernel
        covariance = se.SqexpCFARD(n_dimensions=K)
        hyperparams = {'covar': SP.log([1]*(K+1))}

    #initialization of X at arandom
    X0 = SP.random.randn(N,K)
    X0 = Spca
    hyperparams['x'] = X0
    
    #standard Gaussian noise
    likelihood = lik.GaussLikISO()
    hyperparams['lik'] = SP.log([0.1])
    g = gplvm.GPLVM(covar_func=covariance,likelihood=likelihood,x=X0,y=Y,gplvm_dimensions=SP.arange(X0.shape[1]))

    #specify optimization bounds:
    bounds = {}
    bounds['lik'] = SP.array([[-5.,5.]]*D)
    hyperparams['x'] = X0

    print "running standard gplvm"
    [opt_hyperparams,opt_lml2] = opt.opt_hyper(g,hyperparams,gradcheck=False)

    print "optimized latent X:"
    print opt_hyperparams['x']
Exemple #3
0
def run_demo():
    LG.basicConfig(level=LG.INFO)
    PL.figure()

    random.seed(1)

    #0. generate Toy-Data; just samples from a superposition of a sin + linear trend
    n_replicates = 4
    xmin = 1
    xmax = 2.5 * SP.pi

    x1_time_steps = 10
    x2_time_steps = 20

    x1 = SP.zeros(x1_time_steps * n_replicates)
    x2 = SP.zeros(x2_time_steps * n_replicates)

    for i in xrange(n_replicates):
        x1[i * x1_time_steps:(i + 1) * x1_time_steps] = SP.linspace(
            xmin, xmax, x1_time_steps)
        x2[i * x2_time_steps:(i + 1) * x2_time_steps] = SP.linspace(
            xmin, xmax, x2_time_steps)

    C = 2  #offset
    #b = 0.5
    sigma1 = 0.15
    sigma2 = 0.15
    n_noises = 1

    b = 0

    y1 = b * x1 + C + 1 * SP.sin(x1)
    #    dy1 = b   +     1*SP.cos(x1)
    y1 += sigma1 * random.randn(y1.shape[0])
    y1 -= y1.mean()

    y2 = b * x2 + C + 1 * SP.sin(x2)
    #    dy2 = b   +     1*SP.cos(x2)
    y2 += sigma2 * random.randn(y2.shape[0])
    y2 -= y2.mean()

    for i in xrange(n_replicates):
        x1[i * x1_time_steps:(i + 1) * x1_time_steps] += .7 + (i / 2.)
        x2[i * x2_time_steps:(i + 1) * x2_time_steps] -= .7 + (i / 2.)

    x1 = x1[:, SP.newaxis]
    x2 = x2[:, SP.newaxis]

    x = SP.concatenate((x1, x2), axis=0)
    y = SP.concatenate((y1, y2), axis=0)

    #predictions:
    X = SP.linspace(xmin - n_replicates, xmax + n_replicates,
                    100 * n_replicates)[:, SP.newaxis]

    #hyperparamters
    dim = 1
    replicate_indices = []
    for i, xi in enumerate((x1, x2)):
        for rep in SP.arange(i * n_replicates, (i + 1) * n_replicates):
            replicate_indices.extend(SP.repeat(rep, len(xi) / n_replicates))
    replicate_indices = SP.array(replicate_indices)
    n_replicates = len(SP.unique(replicate_indices))

    logthetaCOVAR = [1, 1]
    logthetaCOVAR.extend(SP.repeat(SP.exp(1), n_replicates))
    logthetaCOVAR.extend([sigma1])
    logthetaCOVAR = SP.log(logthetaCOVAR)  #,sigma2])
    hyperparams = {'covar': logthetaCOVAR}

    SECF = se.SqexpCFARD(dim)
    #noiseCF = noise.NoiseReplicateCF(replicate_indices)
    noiseCF = noise.NoiseCFISO()
    shiftCF = combinators.ShiftCF(SECF, replicate_indices)
    CovFun = combinators.SumCF((shiftCF, noiseCF))

    covar_priors = []
    #scale
    covar_priors.append([lnpriors.lnGammaExp, [1, 2]])
    for i in range(dim):
        covar_priors.append([lnpriors.lnGammaExp, [1, 1]])
    #shift
    for i in range(n_replicates):
        covar_priors.append([lnpriors.lnGauss, [0, .5]])
    #noise
    for i in range(n_noises):
        covar_priors.append([lnpriors.lnGammaExp, [1, 1]])

    covar_priors = SP.array(covar_priors)
    priors = {'covar': covar_priors}
    Ifilter = {'covar': SP.ones(n_replicates + 3)}

    gpr = GP(CovFun, x=x, y=y)
    opt_model_params = opt_hyper(gpr,
                                 hyperparams,
                                 priors=priors,
                                 gradcheck=False,
                                 Ifilter=Ifilter)[0]

    #predict
    [M, S] = gpr.predict(opt_model_params, X)

    T = opt_model_params['covar'][2:2 + n_replicates]

    PL.subplot(212)
    gpr_plot.plot_sausage(X,
                          M,
                          SP.sqrt(S),
                          format_line=dict(alpha=1, color='g', lw=2, ls='-'))
    gpr_plot.plot_training_data(x,
                                y,
                                shift=T,
                                replicate_indices=replicate_indices,
                                draw_arrows=2)

    PL.suptitle("Example for GPTimeShift with simulated data", fontsize=23)

    PL.title("Regression including time shift")
    PL.xlabel("x")
    PL.ylabel("y")
    ylim = PL.ylim()

    gpr = GP(combinators.SumCF((SECF, noiseCF)), x=x, y=y)
    priors = {'covar': covar_priors[[0, 1, -1]]}
    hyperparams = {'covar': logthetaCOVAR[[0, 1, -1]]}
    opt_model_params = opt_hyper(gpr,
                                 hyperparams,
                                 priors=priors,
                                 gradcheck=False)[0]

    PL.subplot(211)
    #predict
    [M, S] = gpr.predict(opt_model_params, X)

    gpr_plot.plot_sausage(X,
                          M,
                          SP.sqrt(S),
                          format_line=dict(alpha=1, color='g', lw=2, ls='-'))
    gpr_plot.plot_training_data(x, y, replicate_indices=replicate_indices)

    PL.title("Regression without time shift")
    PL.xlabel("x")
    PL.ylabel("y")
    PL.ylim(ylim)

    PL.subplots_adjust(left=.1,
                       bottom=.1,
                       right=.96,
                       top=.8,
                       wspace=.4,
                       hspace=.4)
    PL.show()
Exemple #4
0
    def Itrafo(y):
        return y**(1 / float(3))

    z = trafo(y)
    L = (z.max() - z.min())
    z /= L

    n_terms = 3
    # build GP
    likelihood = lik.GaussLikISO()
    # covar_parms = SP.log([1,1,1E-5])
    covar_parms = SP.log([1, 1])
    hyperparams = {'covar': covar_parms, 'lik': SP.log([sigma])}

    SECF = se.SqexpCFARD(n_dimensions=n_dimensions)
    muCF = mu.MuCF(N=X.shape[0])
    #covar = combinators.SumCF([SECF,muCF])
    covar = SECF
    warping_function = None
    mean_function = None
    bounds = {}
    if 1:
        warping_function = TanhWarpingFunction(n_terms=n_terms)
        hyperparams['warping'] = 1E-2 * SP.random.randn(n_terms, 3)
        bounds.update(warping_function.get_bounds())

    if 0:
        mean_function = LinMeanFunction(X=SP.ones([x.shape[0], 1]))
        hyperparams['mean'] = 1E-2 * SP.randn(1)
Exemple #5
0
def run_demo():
    LG.basicConfig(level=LG.INFO)
    random.seed(1)

    #1. create toy data
    [x, y] = create_toy_data()
    n_dimensions = 1

    #2. location of unispaced predictions
    X = SP.linspace(0, 10, 100)[:, SP.newaxis]

    if 0:
        #old interface where the covaraince funciton and likelihood are one thing:
        #hyperparamters
        covar_parms = SP.log([1, 1, 1])
        hyperparams = {'covar': covar_parms}
        #construct covariance function
        SECF = se.SqexpCFARD(n_dimensions=n_dimensions)
        noiseCF = noise.NoiseCFISO()
        covar = combinators.SumCF((SECF, noiseCF))
        covar_priors = []
        #scale
        covar_priors.append([lnpriors.lnGammaExp, [1, 2]])
        covar_priors.extend([[lnpriors.lnGammaExp, [1, 1]]
                             for i in xrange(n_dimensions)])
        #noise
        covar_priors.append([lnpriors.lnGammaExp, [1, 1]])
        priors = {'covar': covar_priors}
        likelihood = None

    if 1:
        #new interface with likelihood parametres being decoupled from the covaraince function
        likelihood = lik.GaussLikISO()
        covar_parms = SP.log([1, 1])
        hyperparams = {'covar': covar_parms, 'lik': SP.log([1])}
        #construct covariance function
        SECF = se.SqexpCFARD(n_dimensions=n_dimensions)
        covar = SECF
        covar_priors = []
        #scale
        covar_priors.append([lnpriors.lnGammaExp, [1, 2]])
        covar_priors.extend([[lnpriors.lnGammaExp, [1, 1]]
                             for i in xrange(n_dimensions)])
        lik_priors = []
        #noise
        lik_priors.append([lnpriors.lnGammaExp, [1, 1]])
        priors = {'covar': covar_priors, 'lik': lik_priors}

    gp = GP(covar, likelihood=likelihood, x=x, y=y)
    opt_model_params = opt.opt_hyper(gp,
                                     hyperparams,
                                     priors=priors,
                                     gradcheck=False)[0]

    #predict
    [M, S] = gp.predict(opt_model_params, X)

    #create plots
    gpr_plot.plot_sausage(X, M, SP.sqrt(S))
    gpr_plot.plot_training_data(x, y)
    PL.show()
Exemple #6
0
SP.random.seed(1)

n_dimensions = 1
n_samples = 30
X = SP.randn(n_samples, n_dimensions)
y = SP.dot(X, SP.randn(n_dimensions, 1))
y += 0.2 * SP.randn(y.shape[0], y.shape[1])

covar_params = SP.random.randn(n_dimensions + 1)
lik_params = SP.random.randn(1)

Xs = SP.linspace(X.min() - 3, X.max() + 3)[:, SP.newaxis]

t0 = time.time()
#pygp: OLD
covar_ = se.SqexpCFARD(n_dimensions)
ll_ = lik.GaussLikISO()
hyperparams_ = {'covar': covar_params, 'lik': lik_params}
gp_ = GP.GP(covar_, likelihood=ll_, x=X, y=y)
lml_ = gp_.LML(hyperparams_)
dlml_ = gp_.LMLgrad(hyperparams_)
#optimize using pygp:
opt_params_ = opt.opt_hyper(gp_, hyperparams_)[0]
lmlo_ = gp_.LML(opt_params_)

pdb.set_trace()

#GPMIX:
cov = SP.ones([y.shape[0], 2])
cov[:, 1] = SP.randn(cov.shape[0])
covar = limix.CCovSqexpARD(n_dimensions)
Exemple #7
0
    #x2 = SP.concatenate((x2, x2_rep), axis=1)

    x = SP.concatenate((x1, x2), axis=0)
    y = SP.concatenate((C[1], T[1]), axis=0)
    
    #predictions:
    X = SP.linspace(2, x2.max(), 100)[:, SP.newaxis]
    X_g1 = SP.repeat(0, len(X)).reshape(-1, 1)
    X_g2 = SP.repeat(1, len(X)).reshape(-1, 1)

    #hyperparamters
    dim = 1
    group_indices = SP.concatenate([SP.repeat(i, len(xi)) for i, xi in enumerate((C[0].reshape(-1, 1),
                                                                                T[0].reshape(-1, 1)))])
    
    SECF = se.SqexpCFARD(dim)
    breakpointCF = breakpoint.DivergeCF()
    #noiseCF = noise.NoiseReplicateCF(replicate_indices)
    noiseCF = noise.NoiseCFISO()
    SECF_noise = combinators.SumCF((SECF, noiseCF))
    CovFun = combinators.ProductCF((SECF_noise, breakpointCF))

    covar_priors = []
    #scale
    covar_priors.append([lnpriors.lnGammaExp, [6, .3]])
    for i in range(dim):
        covar_priors.append([lnpriors.lnGammaExp, [30, .1]])
    #noise
    for i in range(1):
        covar_priors.append([lnpriors.lnGammaExp, [1, .3]])
    #breakpoint, no knowledge
Exemple #8
0
    K1 = c1.K()
    K2 = c2.K(params, X, X)

    dK1 = c1.Kgrad_param(0)
    dK2 = c2.Kgrad_theta(params, X, 0)

    dKx1 = c1.Kgrad_X(0)
    dKx2 = c2.Kgrad_x(params, X, X, 0)

    dKx1diag = c1.Kdiag_grad_X(0)
    dKx2diag = c2.Kgrad_xdiag(params, X, 0)

if 0:
    c1 = limix.CCovSqexpARD()
    c2 = se.SqexpCFARD(n_dimensions=n_dimensions)

    params = SP.random.randn(n_dimensions + 1)
    #params[:] = 0
    c1.setX(X)
    c1.setParams(params)

    K1 = c1.K()
    K2 = c2.K(params, X, X)

    print SP.absolute(K1 - K2).max()

    dK1 = c1.Kgrad_param(0)
    dK2 = c2.Kgrad_theta(params, X, 0)
    dKx1 = c1.Kgrad_X(0)
    dKx2 = c2.Kgrad_x(params, X, X, 0)