Exemple #1
0
def run_demo():
    LG.basicConfig(level=LG.INFO)
    random.seed(1)

    #1. create toy data
    [x,y] = create_toy_data()
    n_dimensions = 1
    
    #2. location of unispaced predictions
    X = SP.linspace(0,10,100)[:,SP.newaxis]
        

    if 0:
        #old interface where the covaraince funciton and likelihood are one thing:
        #hyperparamters
        covar_parms = SP.log([1,1,1])
        hyperparams = {'covar':covar_parms}       
        #construct covariance function
        SECF = se.SqexpCFARD(n_dimensions=n_dimensions)
        noiseCF = noise.NoiseCFISO()
        covar = combinators.SumCF((SECF,noiseCF))
        covar_priors = []
        #scale
        covar_priors.append([lnpriors.lnGammaExp,[1,2]])
        covar_priors.extend([[lnpriors.lnGammaExp,[1,1]] for i in xrange(n_dimensions)])
        #noise
        covar_priors.append([lnpriors.lnGammaExp,[1,1]])
        priors = {'covar':covar_priors}
        likelihood = None

    if 1:
        #new interface with likelihood parametres being decoupled from the covaraince function
        likelihood = lik.GaussLikISO()
        covar_parms = SP.log([1,1])
        hyperparams = {'covar':covar_parms,'lik':SP.log([1])}       
        #construct covariance function
        SECF = se.SqexpCFARD(n_dimensions=n_dimensions)
        covar = SECF
        covar_priors = []
        #scale
        covar_priors.append([lnpriors.lnGammaExp,[1,2]])
        covar_priors.extend([[lnpriors.lnGammaExp,[1,1]] for i in xrange(n_dimensions)])
        lik_priors = []
        #noise
        lik_priors.append([lnpriors.lnGammaExp,[1,1]])
        priors = {'covar':covar_priors,'lik':lik_priors}

        

    
    gp = GP(covar,likelihood=likelihood,x=x,y=y)
    opt_model_params = opt.opt_hyper(gp,hyperparams,priors=priors,gradcheck=False)[0]
    
    #predict
    [M,S] = gp.predict(opt_model_params,X)

    #create plots
    gpr_plot.plot_sausage(X,M,SP.sqrt(S))
    gpr_plot.plot_training_data(x,y)
    PL.show()
Exemple #2
0
def plot_results(twosample_object,
                 xlabel="input", ylabel="ouput", title=None,
                 interval_indices=None, alpha=None, legend=True,
                 replicate_indices=None, shift=None, *args, **kwargs):
    """
    Plot the results given by last prediction.

    Two Instance Plots of comparing two groups to each other:

    **Parameters:**
    
    twosample_object : :py:class:`gptwosample.twosample`
        GPTwoSample object, on which already 'predict' was called.
    
    **Differential Groups:**
    
    .. image:: ../images/plotGPTwoSampleDifferential.pdf
        :height: 8cm
        
    **Non-Differential Groups:**
    
    .. image:: ../images/plotGPTwoSampleSame.pdf
        :height: 8cm
    
    Returns:
        Proper rectangles for use in pylab.legend().
    """
    if twosample_object._predicted_mean_variance is None:
        print "Not yet predicted"
        return
    if interval_indices is None:
        interval_indices = get_model_structure(
        common=SP.array(SP.zeros_like(twosample_object.get_data(common_id)[0]), dtype='bool'),
        individual=SP.array(SP.ones_like(twosample_object.get_data(individual_id, 0)[0]), dtype='bool'))
    
    if title is None:
        title = r'Prediction result: $\log(p(\mathcal{H}_I)/p(\mathcal{H}_S)) = %.2f $' % (twosample_object.bayes_factor())

#        plparams = {'axes.labelsize': 20,
#            'text.fontsize': 20,
#            'legend.fontsize': 18,
#            'title.fontsize': 22,
#            'xtick.labelsize': 20,
#            'ytick.labelsize': 20,
#            'usetex': True }

    legend_plots = []
    legend_names = []

    calc_replicate_indices = replicate_indices is None

    alpha_groups = alpha
    if alpha is not None:
        alpha_groups = 1 - alpha
    
    from matplotlib.cm import jet #@UnresolvedImport

    for name, value in twosample_object._predicted_mean_variance.iteritems():
        mean = value['mean']
        var = SP.sqrt(value['var'])
        if len(mean.shape) > 1:
            number_of_groups = mean.shape[0]
            first = True
            for i in range(number_of_groups):
                col_num = (i / (2. * number_of_groups))
                col = jet(col_num)#(i/number_of_groups,i/number_of_groups,.8)
                data = twosample_object.get_data(name, i)
                replicate_length = len(SP.unique(data[0]))
                number_of_replicates = len(data[0]) / replicate_length
                if calc_replicate_indices:
                    # Assume replicates are appended one after another
                    replicate_indices = SP.concatenate([SP.repeat(rep, replicate_length) for rep in range(number_of_replicates)])
                shifti = deepcopy(shift)
                if shifti is not None:
                    shifti = shift[i * number_of_replicates:(i + 1) * number_of_replicates]
                    #import pdb;pdb.set_trace()
                    PLOT.plot_sausage(twosample_object._interpolation_interval_cache[name] - SP.mean(shifti), mean[i], var[i], format_fill={'alpha':0.2, 'facecolor':col}, format_line={'alpha':1, 'color':col, 'lw':3, 'ls':'--'}, alpha=alpha_groups)[0]
                else:
                    PLOT.plot_sausage(twosample_object._interpolation_interval_cache[name],
                                      mean[i], var[i],
                                      format_fill={'alpha':0.2, 'facecolor':col},
                                      format_line={'alpha':1, 'color':col, 'lw':3, 'ls':'--'}, alpha=alpha_groups)[0]
                PLOT.plot_training_data(
                        SP.array(data[0]), SP.array(data[1]),
                        format_data={'alpha':.8,
                                     'marker':'.',
                                     'linestyle':'--',
                                     'lw':1,
                                     'markersize':6,
                                     'color':col},
                        replicate_indices=replicate_indices,
                        shift=shifti, *args, **kwargs)
                if(first):
                    legend_plots.append(PL.Rectangle((0, 0), 1, 1, alpha=.2, fill=True, facecolor=col))
                    legend_names.append("%s %i" % (name, i + 1))
                    #first=False
        else:
            col = jet(1.)
            #data = twosample_object.get_data(name, interval_indices=interval_indices)   
            #PLOT.plot_training_data(
            #        data[0], data[1],
            #        format_data={'alpha':.2,
#                                 'marker':'.',
#                                 'linestyle':'',
#                                 'markersize':10,
#                                 'color':col})
            legend_names.append("%s" % (name))
            PLOT.plot_sausage(
                twosample_object._interpolation_interval_cache[name], mean, var,
                format_fill={'alpha':0.2, 'facecolor':col},
                format_line={'alpha':1, 'color':col, 'lw':3, 'ls':'--'}, alpha=alpha)[0]
            legend_plots.append(PL.Rectangle((0, 0), 1, 1, alpha=.2, fc=col, fill=True))
    if legend:
        PL.legend(legend_plots, legend_names,
                  bbox_to_anchor=(0., 0., 1., 0.), loc=3,
                  ncol=2,
                  mode="expand",
                  borderaxespad=0.,
                  fancybox=False, frameon=False)
    
    PL.xlabel(xlabel)
    PL.ylabel(ylabel)

    PL.subplots_adjust(top=.88)
    PL.title(title, fontsize=22)
    
    return legend_plots
Exemple #3
0
def run_demo():
    LG.basicConfig(level=LG.INFO)
    PL.figure()

    random.seed(1)

    #0. generate Toy-Data; just samples from a superposition of a sin + linear trend
    n_replicates = 4
    xmin = 1
    xmax = 2.5 * SP.pi

    x1_time_steps = 10
    x2_time_steps = 20

    x1 = SP.zeros(x1_time_steps * n_replicates)
    x2 = SP.zeros(x2_time_steps * n_replicates)

    for i in xrange(n_replicates):
        x1[i * x1_time_steps:(i + 1) * x1_time_steps] = SP.linspace(
            xmin, xmax, x1_time_steps)
        x2[i * x2_time_steps:(i + 1) * x2_time_steps] = SP.linspace(
            xmin, xmax, x2_time_steps)

    C = 2  #offset
    #b = 0.5
    sigma1 = 0.15
    sigma2 = 0.15
    n_noises = 1

    b = 0

    y1 = b * x1 + C + 1 * SP.sin(x1)
    #    dy1 = b   +     1*SP.cos(x1)
    y1 += sigma1 * random.randn(y1.shape[0])
    y1 -= y1.mean()

    y2 = b * x2 + C + 1 * SP.sin(x2)
    #    dy2 = b   +     1*SP.cos(x2)
    y2 += sigma2 * random.randn(y2.shape[0])
    y2 -= y2.mean()

    for i in xrange(n_replicates):
        x1[i * x1_time_steps:(i + 1) * x1_time_steps] += .7 + (i / 2.)
        x2[i * x2_time_steps:(i + 1) * x2_time_steps] -= .7 + (i / 2.)

    x1 = x1[:, SP.newaxis]
    x2 = x2[:, SP.newaxis]

    x = SP.concatenate((x1, x2), axis=0)
    y = SP.concatenate((y1, y2), axis=0)

    #predictions:
    X = SP.linspace(xmin - n_replicates, xmax + n_replicates,
                    100 * n_replicates)[:, SP.newaxis]

    #hyperparamters
    dim = 1
    replicate_indices = []
    for i, xi in enumerate((x1, x2)):
        for rep in SP.arange(i * n_replicates, (i + 1) * n_replicates):
            replicate_indices.extend(SP.repeat(rep, len(xi) / n_replicates))
    replicate_indices = SP.array(replicate_indices)
    n_replicates = len(SP.unique(replicate_indices))

    logthetaCOVAR = [1, 1]
    logthetaCOVAR.extend(SP.repeat(SP.exp(1), n_replicates))
    logthetaCOVAR.extend([sigma1])
    logthetaCOVAR = SP.log(logthetaCOVAR)  #,sigma2])
    hyperparams = {'covar': logthetaCOVAR}

    SECF = se.SqexpCFARD(dim)
    #noiseCF = noise.NoiseReplicateCF(replicate_indices)
    noiseCF = noise.NoiseCFISO()
    shiftCF = combinators.ShiftCF(SECF, replicate_indices)
    CovFun = combinators.SumCF((shiftCF, noiseCF))

    covar_priors = []
    #scale
    covar_priors.append([lnpriors.lnGammaExp, [1, 2]])
    for i in range(dim):
        covar_priors.append([lnpriors.lnGammaExp, [1, 1]])
    #shift
    for i in range(n_replicates):
        covar_priors.append([lnpriors.lnGauss, [0, .5]])
    #noise
    for i in range(n_noises):
        covar_priors.append([lnpriors.lnGammaExp, [1, 1]])

    covar_priors = SP.array(covar_priors)
    priors = {'covar': covar_priors}
    Ifilter = {'covar': SP.ones(n_replicates + 3)}

    gpr = GP(CovFun, x=x, y=y)
    opt_model_params = opt_hyper(gpr,
                                 hyperparams,
                                 priors=priors,
                                 gradcheck=False,
                                 Ifilter=Ifilter)[0]

    #predict
    [M, S] = gpr.predict(opt_model_params, X)

    T = opt_model_params['covar'][2:2 + n_replicates]

    PL.subplot(212)
    gpr_plot.plot_sausage(X,
                          M,
                          SP.sqrt(S),
                          format_line=dict(alpha=1, color='g', lw=2, ls='-'))
    gpr_plot.plot_training_data(x,
                                y,
                                shift=T,
                                replicate_indices=replicate_indices,
                                draw_arrows=2)

    PL.suptitle("Example for GPTimeShift with simulated data", fontsize=23)

    PL.title("Regression including time shift")
    PL.xlabel("x")
    PL.ylabel("y")
    ylim = PL.ylim()

    gpr = GP(combinators.SumCF((SECF, noiseCF)), x=x, y=y)
    priors = {'covar': covar_priors[[0, 1, -1]]}
    hyperparams = {'covar': logthetaCOVAR[[0, 1, -1]]}
    opt_model_params = opt_hyper(gpr,
                                 hyperparams,
                                 priors=priors,
                                 gradcheck=False)[0]

    PL.subplot(211)
    #predict
    [M, S] = gpr.predict(opt_model_params, X)

    gpr_plot.plot_sausage(X,
                          M,
                          SP.sqrt(S),
                          format_line=dict(alpha=1, color='g', lw=2, ls='-'))
    gpr_plot.plot_training_data(x, y, replicate_indices=replicate_indices)

    PL.title("Regression without time shift")
    PL.xlabel("x")
    PL.ylabel("y")
    PL.ylim(ylim)

    PL.subplots_adjust(left=.1,
                       bottom=.1,
                       right=.96,
                       top=.8,
                       wspace=.4,
                       hspace=.4)
    PL.show()
Exemple #4
0
def run_demo():
    LG.basicConfig(level=LG.INFO)
    random.seed(1)

    #1. create toy data
    [x, y] = create_toy_data()
    n_dimensions = 1

    #2. location of unispaced predictions
    X = SP.linspace(0, 10, 100)[:, SP.newaxis]

    if 0:
        #old interface where the covaraince funciton and likelihood are one thing:
        #hyperparamters
        covar_parms = SP.log([1, 1, 1])
        hyperparams = {'covar': covar_parms}
        #construct covariance function
        SECF = se.SqexpCFARD(n_dimensions=n_dimensions)
        noiseCF = noise.NoiseCFISO()
        covar = combinators.SumCF((SECF, noiseCF))
        covar_priors = []
        #scale
        covar_priors.append([lnpriors.lnGammaExp, [1, 2]])
        covar_priors.extend([[lnpriors.lnGammaExp, [1, 1]]
                             for i in xrange(n_dimensions)])
        #noise
        covar_priors.append([lnpriors.lnGammaExp, [1, 1]])
        priors = {'covar': covar_priors}
        likelihood = None

    if 1:
        #new interface with likelihood parametres being decoupled from the covaraince function
        likelihood = lik.GaussLikISO()
        covar_parms = SP.log([1, 1])
        hyperparams = {'covar': covar_parms, 'lik': SP.log([1])}
        #construct covariance function
        SECF = se.SqexpCFARD(n_dimensions=n_dimensions)
        covar = SECF
        covar_priors = []
        #scale
        covar_priors.append([lnpriors.lnGammaExp, [1, 2]])
        covar_priors.extend([[lnpriors.lnGammaExp, [1, 1]]
                             for i in xrange(n_dimensions)])
        lik_priors = []
        #noise
        lik_priors.append([lnpriors.lnGammaExp, [1, 1]])
        priors = {'covar': covar_priors, 'lik': lik_priors}

    gp = GP(covar, likelihood=likelihood, x=x, y=y)
    opt_model_params = opt.opt_hyper(gp,
                                     hyperparams,
                                     priors=priors,
                                     gradcheck=False)[0]

    #predict
    [M, S] = gp.predict(opt_model_params, X)

    #create plots
    gpr_plot.plot_sausage(X, M, SP.sqrt(S))
    gpr_plot.plot_training_data(x, y)
    PL.show()
def run_demo():
    LG.basicConfig(level=LG.INFO)
    PL.figure()
    
    random.seed(1)
    
    #0. generate Toy-Data; just samples from a superposition of a sin + linear trend
    n_replicates = 4
    xmin = 1
    xmax = 2.5*SP.pi

    x1_time_steps = 10
    x2_time_steps = 20
    
    x1 = SP.zeros(x1_time_steps*n_replicates)
    x2 = SP.zeros(x2_time_steps*n_replicates)

    for i in xrange(n_replicates):
	x1[i*x1_time_steps:(i+1)*x1_time_steps] = SP.linspace(xmin,xmax,x1_time_steps)
	x2[i*x2_time_steps:(i+1)*x2_time_steps] = SP.linspace(xmin,xmax,x2_time_steps)

    C = 2       #offset
    #b = 0.5
    sigma1 = 0.15
    sigma2 = 0.15
    n_noises = 1
    
    b = 0
    
    y1  = b*x1 + C + 1*SP.sin(x1)
#    dy1 = b   +     1*SP.cos(x1)
    y1 += sigma1*random.randn(y1.shape[0])
    y1-= y1.mean()
    
    y2  = b*x2 + C + 1*SP.sin(x2)
#    dy2 = b   +     1*SP.cos(x2)
    y2 += sigma2*random.randn(y2.shape[0])
    y2-= y2.mean()
    
    for i in xrange(n_replicates):
	x1[i*x1_time_steps:(i+1)*x1_time_steps] += .7 + (i/2.)
	x2[i*x2_time_steps:(i+1)*x2_time_steps] -= .7 + (i/2.)  

    x1 = x1[:,SP.newaxis]
    x2 = x2[:,SP.newaxis]
    
    x = SP.concatenate((x1,x2),axis=0)
    y = SP.concatenate((y1,y2),axis=0)
    
    #predictions:
    X = SP.linspace(xmin-n_replicates,xmax+n_replicates,100*n_replicates)[:,SP.newaxis]
    
    #hyperparamters
    dim = 1
    replicate_indices = []
    for i,xi in enumerate((x1,x2)):
        for rep in SP.arange(i*n_replicates, (i+1)*n_replicates):
            replicate_indices.extend(SP.repeat(rep,len(xi)/n_replicates))
    replicate_indices = SP.array(replicate_indices)
    n_replicates = len(SP.unique(replicate_indices))
    
    logthetaCOVAR = [1,1]
    logthetaCOVAR.extend(SP.repeat(SP.exp(1),n_replicates))
    logthetaCOVAR.extend([sigma1])
    logthetaCOVAR = SP.log(logthetaCOVAR)#,sigma2])
    hyperparams = {'covar':logthetaCOVAR}
    
    SECF = se.SqexpCFARD(dim)
    #noiseCF = noise.NoiseReplicateCF(replicate_indices)
    noiseCF = noise.NoiseCFISO()
    shiftCF = combinators.ShiftCF(SECF,replicate_indices)
    CovFun = combinators.SumCF((shiftCF,noiseCF))
    
    covar_priors = []
    #scale
    covar_priors.append([lnpriors.lnGammaExp,[1,2]])
    for i in range(dim):
        covar_priors.append([lnpriors.lnGammaExp,[1,1]])
    #shift
    for i in range(n_replicates):
        covar_priors.append([lnpriors.lnGauss,[0,.5]])    
    #noise
    for i in range(n_noises):
        covar_priors.append([lnpriors.lnGammaExp,[1,1]])
    
    covar_priors = SP.array(covar_priors)
    priors = {'covar':covar_priors}
    Ifilter = {'covar': SP.ones(n_replicates+3)}
    
    gpr = GP(CovFun,x=x,y=y) 
    opt_model_params = opt_hyper(gpr,hyperparams,priors=priors,gradcheck=False,Ifilter=Ifilter)[0]
    
    #predict
    [M,S] = gpr.predict(opt_model_params,X)
    
    T = opt_model_params['covar'][2:2+n_replicates]
    
    PL.subplot(212)
    gpr_plot.plot_sausage(X,M,SP.sqrt(S),format_line=dict(alpha=1,color='g',lw=2, ls='-'))
    gpr_plot.plot_training_data(x,y,shift=T,replicate_indices=replicate_indices,draw_arrows=2)
    
    PL.suptitle("Example for GPTimeShift with simulated data", fontsize=23)
    
    PL.title("Regression including time shift")
    PL.xlabel("x")
    PL.ylabel("y")
    ylim = PL.ylim()
    
    gpr = GP(combinators.SumCF((SECF,noiseCF)),x=x,y=y)
    priors = {'covar':covar_priors[[0,1,-1]]}
    hyperparams = {'covar':logthetaCOVAR[[0,1,-1]]}
    opt_model_params = opt_hyper(gpr,hyperparams,priors=priors,gradcheck=False)[0]
    
    PL.subplot(211)
    #predict
    [M,S] = gpr.predict(opt_model_params,X)
    
    gpr_plot.plot_sausage(X,M,SP.sqrt(S),format_line=dict(alpha=1,color='g',lw=2, ls='-'))
    gpr_plot.plot_training_data(x,y,replicate_indices=replicate_indices)
    
    PL.title("Regression without time shift")
    PL.xlabel("x")
    PL.ylabel("y")
    PL.ylim(ylim)
    
    PL.subplots_adjust(left=.1, bottom=.1, 
    right=.96, top=.8,
    wspace=.4, hspace=.4)
    PL.show()
Exemple #6
0
    
#    import copy
#    _hyperparams = copy.deepcopy(opt_model_params)
    # _logtheta = SP.array([0,0,0,0],dtype='double')
    # _logtheta[:2] = _hyperparams['covar'][:2]
    # _logtheta[3] = _hyperparams['covar'][2]
    # _hyperparams['covar'] = _logtheta

    #[opt_model_params,opt_lml]=GPR.optHyper(gpr_BP,hyperparams,priors=priors,gradcheck=True,Ifilter=Ifilter)

    import pygp.plot.gpr_plot as gpr_plot
    first = True
    [M, S] = gpr_opt_hyper.predict(opt_model_params, X)
    gpr_plot.plot_sausage(X, M[0], SP.sqrt(S[0]))
    gpr_plot.plot_sausage(X, M[1], SP.sqrt(S[1]))
    gpr_plot.plot_training_data(x1, C[1], replicate_indices=x1_rep.reshape(-1))
    gpr_plot.plot_training_data(x2, T[1], replicate_indices=x2_rep.reshape(-1))
    
#    norm = PL.Normalize()

    break_lml = []
    plots = SP.int_(SP.sqrt(24) + 1)
    PL.figure()
    for i, BP in enumerate(x1[0,:]):
        #PL.subplot(plots,plots,i+1)
        _hyper = copy.deepcopy(opt_model_params)
        _logtheta = _hyper['covar']
        _logtheta = SP.concatenate((_logtheta, [BP, 10]))#SP.var(y[:,i])]))
        _hyper['covar'] = _logtheta

        
Exemple #7
0
    #    import copy
    #    _hyperparams = copy.deepcopy(opt_model_params)
    # _logtheta = SP.array([0,0,0,0],dtype='double')
    # _logtheta[:2] = _hyperparams['covar'][:2]
    # _logtheta[3] = _hyperparams['covar'][2]
    # _hyperparams['covar'] = _logtheta

    # [opt_model_params,opt_lml]=GPR.optHyper(gpr_BP,hyperparams,priors=priors,gradcheck=True,Ifilter=Ifilter)

    import pygp.plot.gpr_plot as gpr_plot

    first = True
    [M, S] = gpr_opt_hyper.predict(opt_model_params, X)
    gpr_plot.plot_sausage(X, M[0], SP.sqrt(S[0]))
    gpr_plot.plot_sausage(X, M[1], SP.sqrt(S[1]))
    gpr_plot.plot_training_data(x1, C[1], replicate_indices=x1_rep.reshape(-1))
    gpr_plot.plot_training_data(x2, T[1], replicate_indices=x2_rep.reshape(-1))

    #    norm = PL.Normalize()

    break_lml = []
    plots = SP.int_(SP.sqrt(24) + 1)
    PL.figure()
    for i, BP in enumerate(x1[0, :]):
        # PL.subplot(plots,plots,i+1)
        _hyper = copy.deepcopy(opt_model_params)
        _logtheta = _hyper["covar"]
        _logtheta = SP.concatenate((_logtheta, [BP, 10]))  # SP.var(y[:,i])]))
        _hyper["covar"] = _logtheta

        priors_BP[3] = [lnpriors.lnGauss, [BP, 3]]