コード例 #1
0
ファイル: gpr_tests.py プロジェクト: Jimmy-INL/UQit
def gprTorch_1d_singleTask_test():
    """
    Test for GPR for 1d parameter
    """
    def fEx(x):
        """
        Simulator
        """
        yEx=np.sin(2*mt.pi*x)
        return yEx

    def noiseGen(n,noiseType):
       """
       Generate a 1D numpy array of standard deviations of the observation noise
       """
       if noiseType=='h**o':
          sd=0.2   #standard deviation (Note: non-zero, to avoid instabilities)
          sdV=[sd]*n
          sdV=np.asarray(sdV)
       elif noiseType=='hetero':
          sdMin=0.05
          sdMax=0.55
          sdV=sdMin+(sdMax-sdMin)*np.linspace(0.0,1.0,n)
       return sdV  

    def trainData(xBound,n,noiseType):
        """
        Create training data D={X,Y}
        """
        x=np.linspace(xBound[0],xBound[1],n)
        sdV=noiseGen(n,noiseType)
        y=fEx(x) + sdV * np.random.randn(n)
        return x,y,sdV
      
    #----- SETTINGS ----------------
    n=120           #number of training samples
    nTest=100       #number of test samples
    xBound=[0.,1]   #parameter range
    #Type of the noise
    noiseType='hetero'   #'h**o'=homoscedastic, 'hetero'=heterscedastic
    nIter_=800           #number of iterations in optimization of GPR hyperparameters
    lr_   =0.1           #learning rate in the optimization of the hyperparameters
    convPlot_=True       #plot convergence of optimization of the GPR hyperparameters
    #------------------------------------------------
    #(0) Assemble gprOpts dictionary
    gprOpts={'nIter':nIter_,'lr':lr_,'convPlot':convPlot_}
    #(1) Generate training and test samples
    xTrain,yTrain,noiseSdev=trainData(xBound,n,noiseType)
    xTest = np.linspace(xBound[0]-0.2, xBound[1]+.2, nTest) #if numpy is used for training
    #xTest = torch.linspace(xBound[0], xBound[1], nTest)   #if torch is used for training
    #(2) Construct the GPR using the training data
    gpr_=gpr(xTrain=xTrain[:,None],yTrain=yTrain[:,None],noiseV=noiseSdev,
             xTest=xTest[:,None],gprOpts=gprOpts)
    post_f=gpr_.post_f
    post_obs=gpr_.post_y
    #(3) Exact response surface
    fExTest=fEx(xTest)
    #(4) Plot
    pltOpts={'title':'Single-task GPR, 1D parameter, %s-scedastic noise'%noiseType}
    gprPlot(pltOpts).torch1d(post_f,post_obs,xTrain,yTrain,xTest,fExTest)
コード例 #2
0
def ppce_1d_test():
    """
    Test PPCE over 1D parameter space
    """
    def fEx(x, fType, qInfo):
        """
        Simulator
        """
        yEx = analyticTestFuncs.fEx1D(x, fType, qInfo).val
        return yEx

    #
    def noiseGen(n, noiseType):
        """
        Generate a 1D numpy array of the standard deviation of the observation noise
        """
        if noiseType == 'h**o':  #homoscedastic noise
            sd = 0.1  #(non-zero, to avoid instabilities)
            sdV = [sd] * n
            sdV = np.asarray(sdV)
        elif noiseType == 'hetero':  #heteroscedastic noise
            sdMin = 0.02
            sdMax = 0.2
            sdV = sdMin + (sdMax - sdMin) * np.linspace(0.0, 1.0, n)
        return sdV

    #
    def trainData(xInfo, n, noiseType, trainSamplyType, distType, fType):
        """
        Create training data D={X,Y}
        """
        X_ = sampling.trainSample(sampleType=trainSampleType,
                                  GQdistType=distType,
                                  qInfo=xInfo,
                                  nSamp=n)
        x = X_.q
        sdV = noiseGen(n, noiseType)
        y = fEx(x, fType, xInfo) + sdV * np.random.randn(n)
        return x, y, sdV

    #
    #-------SETTINGS------------------------------
    distType = 'Norm'  #type of distribution of the parameter (Acc. gPCE rule)
    trainSampleType = 'normRand'  #how to draw the trainining samples, see trainSample in sampling.py
    qInfo = [0.5, 0.9]  #info about the parameter
    #if 'Unif', qInfo =[min(q),max(q)]
    #if 'Norm', qInfo=[m,v] for q~N(m,v^2)
    n = 30  #number of training samples in GPR
    noiseType = 'h**o'  #'h**o'=homoscedastic, 'hetero'=heterscedastic
    nGQtest = 10  #number of test points (=Gauss quadrature nodes)
    #GPR options
    nIter_gpr = 1000  #number of iterations in optimization of hyperparameters
    lr_gpr = 0.2  #learning rate for the optimization of the hyperparameters
    convPlot_gpr = True  #plot convergence of the optimization of the GPR hyperparameters
    nMC = 1000  #number of samples drawn from GPR surrogate to construct estimates
    #  for the moments of f(q)
    #---------------------------------------------
    if distType == 'Unif':
        fType = 'type1'
    elif distType == 'Norm':
        fType = 'type2'
    #(1) generate synthetic training data
    qTrain, yTrain, noiseSdev = trainData(qInfo, n, noiseType, trainSampleType,
                                          distType, fType)
    #(2) assemble the ppceDict dictionary
    ppceDict = {
        'nGQtest': nGQtest,
        'qInfo': qInfo,
        'distType': distType,
        'nIter_gpr': nIter_gpr,
        'lr_gpr': lr_gpr,
        'convPlot_gpr': convPlot_gpr,
        'nMC': nMC
    }
    #(3) construct the ppce
    ppce_ = ppce(qTrain, yTrain, noiseSdev, ppceDict)
    fMean_samples = ppce_.fMean_samps
    fVar_samples = ppce_.fVar_samps
    optOut = ppce_.optOut
    #(4) postprocess
    #   (a) plot the GPR surrogate along with response from the exact simulator
    pltOpts = {'title': 'PPCE, 1D param, %s-scedastic noise' % noiseType}
    gpr_torch.gprPlot(pltOpts).torch1d(optOut['post_f'], optOut['post_obs'],
                                       qTrain, yTrain, optOut['qTest'][0],
                                       fEx(optOut['qTest'][0], fType, qInfo))
    #   (b) plot histogram and pdf of the mean and variance distribution
    statsUQit.pdfFit_uniVar(fMean_samples, True, [])
    statsUQit.pdfFit_uniVar(fVar_samples, True, [])
    #   (c) compare the exact moments with estimated values by ppce
    fEx = analyticTestFuncs.fEx1D(qTrain, fType, qInfo)
    fEx.moments(qInfo)
    fMean_ex = fEx.mean
    fVar_ex = fEx.var

    fMean_mean = fMean_samples.mean()
    fMean_sdev = fMean_samples.std()
    fVar_mean = fVar_samples.mean()
    fVar_sdev = fVar_samples.std()
    print(writeUQ.printRepeated('-', 80))
    print('>> Exact mean(f) = %g' % fMean_ex)
    print('   ppce estimated: E[mean(f)] = %g , sdev[mean(f)] = %g' %
          (fMean_mean, fMean_sdev))
    print('>> Exact Var(f) = %g' % fVar_ex)
    print('   ppce estimated: E[Var(f)] = %g , sdev[Var(f)] = %g' %
          (fVar_mean, fVar_sdev))
コード例 #3
0
def ppce_2d_test():
    """
    Test for ppce for 2D parameter
    """
    def fEx(p, sampleType, n, qInfo, fExName):
        """
        Generate synthetic training data
        """
        #  (a) xTrain
        nSamp = n[0] * n[1]
        xi = []
        q = []
        qBound = []
        if sampleType[0] == 'LHS' and sampleType[1] == 'LHS':
            if distType == ['Unif'] * p:
                qBound = qInfo
                xi = sampling.LHS_sampling(nSamp, [[-1, 1]] * p)
                xTrain = np.zeros((nSamp, p))
                for i in range(p):
                    xTrain[:, i] = pce.mapFromUnit(xi[:, i], qBound[i])
                fEx_ = analyticTestFuncs.fEx2D(xTrain[:, 0], xTrain[:, 1],
                                               fExName, 'comp')
            else:
                raise ValueError(
                    "LHS works only when all q have 'Unif' distribution.")
        else:
            for i in range(p):
                samps = sampling.trainSample(sampleType=sampleType[i],
                                             GQdistType=distType[i],
                                             qInfo=qInfo[i],
                                             nSamp=n[i])
                q.append(samps.q)
            xTrain = reshaper.vecs2grid(q)
            fEx_ = analyticTestFuncs.fEx2D(q[0], q[1], fExName, 'tensorProd')
        return xTrain, fEx_

    #
    def trainDataGen(p, sampleType, n, qInfo, fExName, noiseType):
        """
        Generate synthetic training data
        """
        #  (a) xTrain and noise-free yTrain
        xTrain, fEx_ = fEx(p, sampleType, n, qInfo, fExName)
        yTrain_noiseFree = fEx_.val
        nSamp = xTrain.shape[0]
        #  (b) set the sdev of the observation noise
        noiseSdev = noiseGen(nSamp, noiseType, xTrain, fExName)
        #  (c) Training data
        yTrain = yTrain_noiseFree + noiseSdev * np.random.randn(nSamp)
        return xTrain, yTrain, noiseSdev, yTrain_noiseFree, fEx_

    #
    def noiseGen(n, noiseType, xTrain, fExName):
        """
       Generate a 1D numpy array of standard deviation of the observation noise
       """
        if noiseType == 'h**o':
            sd = 0.2  #(non-zero, to avoid instabilities)
            sdV = sd * np.ones(n)
        elif noiseType == 'hetero':
            sdV = 0.1 * (analyticTestFuncs.fEx2D(xTrain[:, 0], xTrain[:, 1],
                                                 fExName, 'comp').val + 0.001)
        return sdV

    #
    #----- SETTINGS -------------------------------------------
    #settings for parameters and data
    qInfo = [[-2, 2], [-2, 3]]  #info about the parameter
    #if 'Unif', qInfo =[min(q),max(q)]
    #if 'Norm', qInfo=[m,v] for q~N(m,v^2)
    distType = ['Unif', 'Unif']  #distribution type of parameters
    fExName = 'type1'  #name of simulator to generate synthetic dat
    #see analyticTestFuncs.fEx2D()
    trainSampleType = ['LHS',
                       'LHS']  #sampling type, see trainSample in sampling.py
    n = [10, 12]  #number of training samples for each parameter.
    #note: n[0]*n[1]<128, due to GpyTorch
    noiseType = 'hetero'  #type of observation noise
    #'h**o'=homoscedastic, 'hetero'=heterscedastic
    #options for GPR
    nIter_gpr = 1000  #number of iterations in optimization of hyperparameters
    lr_gpr = 0.1  #learning rate for the optimization of the hyperparameters
    convPlot_gpr = True  #plot convergence of optimization of the GPR hyperparameters
    #options for Gauss quadrature test nodes
    nGQtest = [18, 18]  #number of test samples in each param dimension
    nMC = 100  #number of samples drawn from GPR surrogate to construct estimates
    # for the moments of f(q)
    #---------------------------------------------------------
    p = len(distType)
    #(1) generate synthetic training data
    qTrain, yTrain, noiseSdev, yTrain_noiseFree, fEx_ = trainDataGen(
        p, trainSampleType, n, qInfo, fExName, noiseType)
    #(2) probabilistic PCE
    ppceDict = {
        'nGQtest': nGQtest,
        'qInfo': qInfo,
        'distType': distType,
        'nIter_gpr': nIter_gpr,
        'lr_gpr': lr_gpr,
        'convPlot_gpr': convPlot_gpr,
        'nMC': nMC
    }
    ppce_ = ppce(qTrain, yTrain, noiseSdev, ppceDict)
    optOut = ppce_.optOut
    fMean_samples = ppce_.fMean_samps
    fVar_samples = ppce_.fVar_samps
    #(3) estimate reference mean and varaiance of f(q) using Monte-Carlo approach
    fEx_.moments(distType, qInfo)
    fMean_mc = fEx_.mean
    fVar_mc = fEx_.var
    #(4) postprocess
    #   (a) plot the exact and GPR response surfaces
    gpr_torch.gprPlot().torch2d_3dSurf(qTrain, yTrain, optOut['qTest'],
                                       optOut['post_obs'])
    #   (b) plot histogram and fitted pdf of the mean and variance distributions
    statsUQit.pdfFit_uniVar(fMean_samples, True, [])
    statsUQit.pdfFit_uniVar(fVar_samples, True, [])
    #   (c) compare the reference moments with the estimated values by ppce
    fMean_mean = fMean_samples.mean()
    fMean_sdev = fMean_samples.std()
    fVar_mean = fVar_samples.mean()
    fVar_sdev = fVar_samples.std()
    print(writeUQ.printRepeated('-', 80))
    print('Reference mean(f) = %g' % fMean_mc)
    print('PPCE estimated: E[mean(f)] = %g , sdev[mean(f)] = %g' %
          (fMean_mean, fMean_sdev))
    print('Reference var(f) = %g' % fVar_mc)
    print('PPCE estimated: E[var(f)] = %g , sdev[var(f)] = %g' %
          (fVar_mean, fVar_sdev))
コード例 #4
0
ファイル: gpr_tests.py プロジェクト: Jimmy-INL/UQit
def gprTorch_2d_singleTask_test():
    """
    Test for GPR for 2d input
    """
    ##
    def plot_trainData(n,fSamples,noiseSdev,yTrain):
        """
        Plot the noisy training data which are used in GPR. 
        """
        plt.figure(figsize=(10,5))
        x_=np.zeros(n)
        for i in range(n):
            x_[i]=i+1
        for i in range(500):  #only for plottig possible realizations
            noise_=noiseSdev*np.random.randn(n)
            plt.plot(x_,fSamples+noise_,'.',color='steelblue',alpha=0.4,markersize=1)
        plt.errorbar(x_,fSamples,yerr=1.96*abs(noiseSdev),ls='none',capsize=5,ecolor='k',
                elinewidth=4,label=r'$95\%$ CI in Obs.')
        plt.plot(x_,fSamples,'o' ,markersize=6,markerfacecolor='lime',
                markeredgecolor='salmon',label='Mean Observation')
        plt.plot(x_,yTrain ,'xr' ,markersize=6,label='Sample Observation')
        plt.legend(loc='best',fontsize=15)
        plt.ylabel('QoI',fontsize=17)
        plt.xlabel('Simulation Index',fontsize=17)
        plt.xticks(fontsize=15)
        plt.yticks(fontsize=15)    
        plt.title('Training data with associated confidence')
        plt.show()
    ##
    def trainDataGen(p,sampleType,n,qBound,fExName,noiseType):
        """
        Generate Training Data
        """
        #  (a) xTrain 
        if sampleType=='grid': 
          nSamp=n[0]*n[1]
          gridList=[]
          for i in range(p):
              #grid_=torch.linspace(qBound[i][0],qBound[i][1],n[i])   #torch
              grid_=np.linspace(qBound[i][0],qBound[i][1],n[i])
              gridList.append(grid_)
          xTrain=reshaper.vecs2grid(gridList)
#       xTrain = gpytorch.utils.grid.create_data_from_grid(gridList)  #torch
        elif sampleType=='random': 
             nSamp=n     #number of random samples   
             xTrain=sampling.LHS_sampling(n,qBound)
        #  (b) Observation noise   
        #noiseSdev=torch.ones(nTot).mul(0.1)    #torch
        noiseSdev=noiseGen(nSamp,noiseType,xTrain,fExName)
        #yTrain = torch.sin(mt.pi*xTrain[:,0])*torch.cos(.25*mt.pi*xTrain[:,1])+
        #         torch.randn_like(xTrain[:,0]).mul(0.1)   #torch
        #  (c) Training response
        yTrain=analyticTestFuncs.fEx2D(xTrain[:,0],xTrain[:,1],fExName,'comp').val
        yTrain_noiseFree=yTrain
        yTrain=yTrain_noiseFree+noiseSdev*np.random.randn(nSamp)
        return xTrain,yTrain,noiseSdev,yTrain_noiseFree
    ##    
    def noiseGen(n,noiseType,xTrain,fExName):
       """
       Generate a 1D numpy array of standard deviations of the observation noise
       """
       if noiseType=='h**o':
          sd=0.2   # noise standard deviation  (Note: non-zero, to avoid instabilities)
          sdV=sd*np.ones(n)
       elif noiseType=='hetero':
          #sdMin=0.01
          #sdMax=0.5
          #sdV=sdMin+(sdMax-sdMin)*np.linspace(0.0,1.0,n)
          #sdV=0.15*np.ones(n)
          sdV=0.1*(analyticTestFuncs.fEx2D(xTrain[:,0],xTrain[:,1],fExName,'comp').val+0.001)
       return sdV
    #
    #----- SETTINGS
    qBound=[[-2,2],[-2,2]]   #Admissible range of parameters
    fExName='type1'          #Type of simulator in analyticTestFuncs.fEx2D
                             #'type1', 'type2', 'type3', 'Rosenbrock'
    sampleType='random'      #'random' or 'grid': type of training samples
    if sampleType=='grid':
       n=[9,9]               #number of training samples in each input dimension
    elif sampleType=='random':
       n=100                 #total number of training samples drawn randomly
    noiseType='hetero'       #noise type: 'h**o'=homoscedastic, 'hetero'=heterscedastic
    #options for GPR
    nIter_=1000        #number of iterations in optimization of GPR hyperparameters
    lr_   =0.05        #learning rate in the optimization of the hyperparameters
    convPlot_=True     #plot convergence of optimization of GPR hyperparameters
    nTest=[21,20]     #number of test points in each parameter dimension
    #------------------------------------------------
    #(0) Assemble the gprOpts dict
    gprOpts={'nIter':nIter_,'lr':lr_,'convPlot':convPlot_}
    #(1) Generate training data
    p=len(qBound)    #dimension of the input
    xTrain,yTrain,noiseSdev,yTrain_noiseFree=trainDataGen(p,sampleType,n,qBound,fExName,noiseType)
    nSamp=yTrain.shape[0]
    plot_trainData(nSamp,yTrain_noiseFree,noiseSdev,yTrain)
    #(2) Create the test samples
    xTestList=[]
    for i in range(p):
        #grid_=torch.linspace(qBound[i][0],qBound[i][1],20)    #torch
        grid_=np.linspace(qBound[i][0],qBound[i][1],nTest[i])
        xTestList.append(grid_)
    xTest=reshaper.vecs2grid(xTestList)
    #(3) Construct the GPR based on the training data and make predictions at the test samples
    gpr_=gpr(xTrain,yTrain[:,None],noiseSdev,xTest,gprOpts)
    post_f=gpr_.post_f
    post_obs=gpr_.post_y
    # Predicted mean and variance of the posteriors at the test grid    
    fP_=gprPost(post_f,nTest)
    fP_.torchPost()
    post_f_mean=fP_.mean
    post_f_sdev=fP_.sdev
    lower_f=fP_.ciL
    upper_f=fP_.ciU
    obsP_=gprPost(post_obs,nTest)
    obsP_.torchPost()
    post_obs_mean=obsP_.mean
    post_obs_sdev=obsP_.sdev
    lower_obs=obsP_.ciL
    upper_obs=obsP_.ciU
    # Plots
    with torch.no_grad():
        fig = plt.figure(figsize=(16,4))
        ax = fig.add_subplot(141)        
        fEx_test=analyticTestFuncs.fEx2D(xTest[:,0],xTest[:,1],fExName,'comp').val
        CS0=ax.contour(xTestList[0],xTestList[1],fEx_test.reshape((nTest[0],nTest[1]),order='F').T,levels=40)
        ax.clabel(CS0, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False)
        ax.plot(xTrain[:,0],xTrain[:,1],'or')
        ax.set_title(r'Exact $f(q)$')
        ax = fig.add_subplot(142)
        CS1=ax.contour(xTestList[0],xTestList[1],(post_f_mean).T,levels=40)
        ax.clabel(CS1, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False)
        ax.plot(xTrain[:,0],xTrain[:,1],'or')
        ax.set_title(r'Mean Posterior of $f(q)$')
        ax = fig.add_subplot(143)
        CS2=ax.contour(xTestList[0],xTestList[1],upper_obs.T,levels=40)
        ax.clabel(CS2, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False)
        ax.plot(xTrain[:,0],xTrain[:,1],'or')
        ax.set_title(r'Upper Confidence for Observations')
        ax = fig.add_subplot(144)
        CS2=ax.contour(xTestList[0],xTestList[1],lower_obs.T,levels=40)
        ax.clabel(CS2, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False)
        ax.plot(xTrain[:,0],xTrain[:,1],'or')
        ax.set_title(r'Lower Confidence for Observations')
        plt.show()
        #2dplot
        pltOpts={'title':'Mean posterior of f(q)',
                 'xlab':r'$q_1$',
                 'ylab':r'$q_2$'}
        gprPlot(pltOpts).torch2d_2dcont(xTrain,xTestList,post_f_mean) 
        #3d plot
        gprPlot().torch2d_3dSurf(xTrain,yTrain,xTestList,post_obs)
コード例 #5
0
def gprTorch_2d_singleTask_test2():
    """
    Test for GPR for 2d input - Importance of standardization of the training data
    Run the test with 'standardizeYTrain_' being True and False. 
    """

    ##
    def trainData():
        """
        Generate training data
        """
        qBound = [[-1, 1], [-1, 1]]
        x1_ = sampling.trainSample(sampleType='GQ',
                                   GQdistType='Unif',
                                   qInfo=qBound[0],
                                   nSamp=4)
        x2_ = sampling.trainSample(sampleType='GQ',
                                   GQdistType='Unif',
                                   qInfo=qBound[1],
                                   nSamp=4)
        xTrain = reshaper.vecs2grid([x1_.q, x2_.q])
        yTrain_mean = np.asarray([
            -0.0169906, -0.0191095, -0.0167435, -0.0172338, -0.0203195,
            -0.020089, -0.0184691, -0.0188843, -0.0164581, -0.0200013,
            -0.0186512, -0.0159343, -0.0185975, -0.0155899, -0.0178921,
            -0.018329
        ])
        yTrain_sdev = np.asarray([
            0.00131249, 0.00104324, 0.00085491, 0.00099751, 0.00094231,
            0.00102579, 0.0010804, 0.00089567, 0.00081245, 0.0011208,
            0.00110756, 0.00126673, 0.00108875, 0.00145115, 0.00098541,
            0.00130559
        ])

        return qBound, xTrain, yTrain_mean, yTrain_sdev

    #
    #----- SETTINGS
    #options for GPR
    nIter_ = 3000  #number of iterations in optimization of GPR hyperparameters
    lr_ = 0.05  #learning rate in the optimization of the hyperparameters
    convPlot_ = True  #plot convergence of optimization of GPR hyperparameters
    standardizeYTrain_ = True  #standardize the Y training data?
    nTest = [41, 40]  #number of test points in each parameter dimension
    #---------------------------------
    #(1) Generate training data
    qBound, xTrain, yTrain_mean, yTrain_sdev = trainData()
    p = len(qBound)  #dimension of the input
    nSamp = len(yTrain_mean)

    #(2) Generate noisy training data
    noise_ = np.random.randn(nSamp)
    noise_ = yTrain_sdev * noise_
    yTrain = yTrain_mean + noise_

    #(3) Create test points
    xTestList = []
    for i in range(p):
        grid_ = np.linspace(qBound[i][0], qBound[i][1], nTest[i])
        xTestList.append(grid_)
    xTest = reshaper.vecs2grid(xTestList)

    #(4) Fit the GPR
    gprOpts = {
        'nIter': nIter_,
        'lr': lr_,
        'convPlot': convPlot_,
        'standardizeYTrain': standardizeYTrain_
    }
    gpr_ = gpr(xTrain, yTrain[:, None], yTrain_sdev, xTest, gprOpts)
    post_f = gpr_.post_f
    post_obs = gpr_.post_y
    #(4) Predicted mean and variance of the posteriors at the test grid
    shift_ = 0.0  #default shift and scaling, assuming no standardization in the training data
    scale_ = 1.0
    if standardizeYTrain_:
        shift_ = gpr_.shift[0]  #0: single-response
        scale_ = gpr_.scale[0]

    fP_ = gprPost(post_f, nTest, shift=shift_, scale=scale_)
    fP_.torchPost()
    post_f_mean = fP_.mean
    post_f_sdev = fP_.sdev
    obsP_ = gprPost(post_obs, nTest, shift=shift_, scale=scale_)
    obsP_.torchPost()
    post_obs_mean = obsP_.mean
    post_obs_sdev = obsP_.sdev

    # (5) Plots
    # When using 'torch2d_2dcont', de-standardization of the predicted  mean and sdev is manual
    pltOpts = {
        'title': 'Mean of posterior f(q)',
        'xlab': r'$q_1$',
        'ylab': r'$q_2$'
    }
    gprPlot(pltOpts).torch2d_2dcont(xTrain, xTestList,
                                    post_f_mean * scale_ + shift_)
    pltOpts = {
        'title': 'Sdev of posterior f(q)',
        'xlab': r'$q_1$',
        'ylab': r'$q_2$'
    }
    gprPlot(pltOpts).torch2d_2dcont(xTrain, xTestList, post_f_sdev * scale_)
    # When using torch2d_3dSurf, the optional arguments shift and scale can be passed.
    # Therefore, de-standardization of the predictions is automatic.
    pltOpts = {
        'title': 'Mean of posterior f(q)',
        'xlab': r'$q_1$',
        'ylab': r'$q_2$'
    }
    gprPlot(pltOpts).torch2d_3dSurf(xTrain,
                                    yTrain,
                                    xTestList,
                                    post_f,
                                    shift=shift_,
                                    scale=scale_)
    pltOpts = {
        'title': 'Mean of posterior predictive y=f(q)+e',
        'xlab': r'$q_1$',
        'ylab': r'$q_2$'
    }
    gprPlot(pltOpts).torch2d_3dSurf(xTrain,
                                    yTrain,
                                    xTestList,
                                    post_obs,
                                    shift=shift_,
                                    scale=scale_)