예제 #1
0
    def trainData():
        """
        Generate training data
        """
        qBound = [[-1, 1], [-1, 1]]
        x1_ = sampling.trainSample(sampleType='GQ',
                                   GQdistType='Unif',
                                   qInfo=qBound[0],
                                   nSamp=4)
        x2_ = sampling.trainSample(sampleType='GQ',
                                   GQdistType='Unif',
                                   qInfo=qBound[1],
                                   nSamp=4)
        xTrain = reshaper.vecs2grid([x1_.q, x2_.q])
        yTrain_mean = np.asarray([
            -0.0169906, -0.0191095, -0.0167435, -0.0172338, -0.0203195,
            -0.020089, -0.0184691, -0.0188843, -0.0164581, -0.0200013,
            -0.0186512, -0.0159343, -0.0185975, -0.0155899, -0.0178921,
            -0.018329
        ])
        yTrain_sdev = np.asarray([
            0.00131249, 0.00104324, 0.00085491, 0.00099751, 0.00094231,
            0.00102579, 0.0010804, 0.00089567, 0.00081245, 0.0011208,
            0.00110756, 0.00126673, 0.00108875, 0.00145115, 0.00098541,
            0.00130559
        ])

        return qBound, xTrain, yTrain_mean, yTrain_sdev
예제 #2
0
 def fEx(p, sampleType, n, qInfo, fExName):
     """
     Generate synthetic training data
     """
     #  (a) xTrain
     nSamp = n[0] * n[1]
     xi = []
     q = []
     qBound = []
     if sampleType[0] == 'LHS' and sampleType[1] == 'LHS':
         if distType == ['Unif'] * p:
             qBound = qInfo
             xi = sampling.LHS_sampling(nSamp, [[-1, 1]] * p)
             xTrain = np.zeros((nSamp, p))
             for i in range(p):
                 xTrain[:, i] = pce.mapFromUnit(xi[:, i], qBound[i])
             fEx_ = analyticTestFuncs.fEx2D(xTrain[:, 0], xTrain[:, 1],
                                            fExName, 'comp')
         else:
             raise ValueError(
                 "LHS works only when all q have 'Unif' distribution.")
     else:
         for i in range(p):
             samps = sampling.trainSample(sampleType=sampleType[i],
                                          GQdistType=distType[i],
                                          qInfo=qInfo[i],
                                          nSamp=n[i])
             q.append(samps.q)
         xTrain = reshaper.vecs2grid(q)
         fEx_ = analyticTestFuncs.fEx2D(q[0], q[1], fExName, 'tensorProd')
     return xTrain, fEx_
예제 #3
0
def lagInt_Quads2Line_test():
    """
    Test lagInt_Quads2Line().
    The test samples of (q1,q2) are generated along a defined line q2=a*q1+b 
    in the admissible space of q1-a2. 
    The training samples are drawn in the usual way, covering the admissible space of q1-q2.
    """
    #----- SETTINGS --------------------------------------------------------------
    nNodes = [9, 9]  #number of training samples for q1, q2
    sampType = [
        'GLL',  #type of training samples for q1, q2
        'unifSpaced'
    ]
    qBound = [
        [-0.75, 1.5],  #admissible range of q1,q2 
        [-0.8, 2.5]
    ]  #Note that the line should be confined in this space
    lineDef = {
        'start':
        [1.4,
         2.3],  #coordinates of the line's starting point in the q1-q2 plane
        'end': [-0.7,
                -0.2],  #coordinates of the line's end point in the q1-q2 plane
        'noPtsLine': 100  #number of the test samples
    }
    #-----------------------------------------------------------------------------
    p = len(nNodes)
    # Generate the training samples
    qNodes = []
    for i in range(p):
        qNodes_ = sampling.trainSample(sampleType=sampType[i],
                                       qInfo=qBound[i],
                                       nSamp=nNodes[i])
        qNodes.append(qNodes_.q)
    # Evaluate the simulator at the training samples
    fNodes = analyticTestFuncs.fEx2D(qNodes[0], qNodes[1], 'type1',
                                     'tensorProd').val
    # Construct the lagrange interpolation and evalautes it at the test points over the line
    qLine, fLine = lagInt_Quads2Line(fNodes, qNodes, lineDef)
    # Plots
    plt.figure(figsize=(8, 5))
    plt.plot(qLine[0],
             fLine,
             '-ob',
             mfc='none',
             label='Lagrange Interpolation')
    fLine_ex = analyticTestFuncs.fEx2D(qLine[0], qLine[1], 'type1',
                                       'comp').val  #exact response
    plt.plot(qLine[0], fLine_ex, '-xr', label='Exact Value')
    plt.xlabel(r'$q_1$', fontsize=16)
    plt.ylabel('Response', fontsize=14)
    plt.legend(loc='best')
    plt.grid(alpha=0.4)
    plt.show()
예제 #4
0
 def trainData(xInfo, n, noiseType, trainSamplyType, distType, fType):
     """
     Create training data D={X,Y}
     """
     X_ = sampling.trainSample(sampleType=trainSampleType,
                               GQdistType=distType,
                               qInfo=xInfo,
                               nSamp=n)
     x = X_.q
     sdV = noiseGen(n, noiseType)
     y = fEx(x, fType, xInfo) + sdV * np.random.randn(n)
     return x, y, sdV
예제 #5
0
    def moments(self, distType, qInfo):
        """
        Mean and variance of f(q) estimated by the Monte-Carlo approach        
        (These can be used as reference values instead of the analytical values)

        Args:
           `distType`: List of length 2
               The i-th value (string) specifies the distribution type of the i-th parameter 
           `qInfo`: List of length 2
               Information about the parameter range or distribution.
                 * If `q` is Gaussian ('Norm' or 'normRand') => qInfo=[mean,sdev]
                 * Otherwise, qInfo=[min(q),max(q)]=admissible range of q
        
        Returns:
           `mean`: float
               Expected value of f(q) estimated by the Monte-Carlo method
           `var`: float
               Variance of f(q) estimated by the Monte-Carlo method
        """
        nMC = 100000  #number of MC samples
        print(
            '... Reference moments are calculated by the Monte-Carlo method with %d samples'
            % nMC)
        qMC = []
        p = len(distType)
        if p != 2:
            raise ValueError("distType should have length 2")
        for i in range(p):
            if distType[i] == 'Unif':
                sampleType_ = 'unifRand'
            elif distType[i] == 'Norm':
                sampleType_ = 'normRand'
            else:
                raise ValueError("Invalid distType for parameter %d" % i)
            samps = sampling.trainSample(sampleType=sampleType_,
                                         GQdistType=distType[i],
                                         qInfo=qInfo[i],
                                         nSamp=nMC)
            qMC.append(samps.q)
        self.fVal_mc = fEx2D(qMC[0], qMC[1], self.typ, 'comp').val
        self.mean = np.mean(self.fVal_mc)
        self.var = np.mean(self.fVal_mc**2.) - self.mean**2.
예제 #6
0
def lagInt_1d_test():
    """
    Test Lagrange inerpolation over a 1D parameter space.
    """
    #----- SETTINGS -------------------
    nNodes = 15  #number of training samples (nodes)
    qBound = [-1, 3]  #range over which the samples are taken
    nTest = 100  #number of test points
    sampType = 'GLL'  #Type of samples, see trainSample class in samping.py
    fType = 'type1'  #Type of model function used as simulator
    #----------------------------------
    # Create the training samples and evaluate the simulator at each sample
    samps_ = sampling.trainSample(sampleType=sampType,
                                  qInfo=qBound,
                                  nSamp=nNodes)
    qNodes = samps_.q
    fNodes = analyticTestFuncs.fEx1D(qNodes, fType, qBound).val
    # Generate the test samples
    qTestFull = np.linspace(qBound[0], qBound[1], nTest)
    qTest = np.linspace(min(qNodes), max(qNodes), nTest)
    # Construct the Lagrange interpolation and evaluate it at the test points
    fInterpTest = lagInt(fNodes=fNodes, qNodes=[qNodes], qTest=[qTest]).val
    # Plot
    fTestFull = analyticTestFuncs.fEx1D(qTestFull, fType, qBound).val
    plt.figure(figsize=(12, 7))
    plt.plot(qTestFull, fTestFull, '--r', lw=2, label='Exact f(q)')
    plt.plot(qTest,
             fInterpTest,
             '-b',
             lw=2,
             label='f(q) by Lagrange Interpolation')
    plt.plot(qNodes, fNodes, 'oc', markersize='8', label='Nodes')
    plt.legend(loc='best', fontsize=17)
    plt.xticks(fontsize=15)
    plt.yticks(fontsize=15)
    plt.grid()
    plt.xlabel(r'$q$', fontsize=26)
    plt.ylabel(r'$f(q)$', fontsize=26)
    plt.show()
예제 #7
0
def lagInt_2d_test():
    """
    Test Lagrange inerpolation over a 2D parameter space.
    """
    #----- SETTINGS --------------------------------------------------------------
    nNodes = [
        5, 4
    ]  #number of  training samples nodes in space of parameters q1, q2
    sampType = [
        'GLL',  #Method of drawing samples for q1, q2
        'unifSpaced'
    ]
    qBound = [
        [-0.75, 1.5],  # admissible range of parameters
        [-0.5, 2.5]
    ]

    # Settings of the exact response surface
    domRange = [
        [-2, 2],  #domain range for q1, q2
        [-3, 3]
    ]
    nTest = [100, 101]  #number of test samples
    #-----------------------------------------------------------------------------
    p = len(nNodes)
    # Create the training samples over each parameter space
    qNodes = []
    for i in range(p):
        qNodes_ = sampling.trainSample(sampleType=sampType[i],
                                       qInfo=qBound[i],
                                       nSamp=nNodes[i])
        qNodes.append(qNodes_.q)
    # Evaluate the simulator at each joint sample
    fNodes = analyticTestFuncs.fEx2D(qNodes[0], qNodes[1], 'type1',
                                     'tensorProd').val
    # Generate the test samples
    qTestList = []
    for i in range(p):
        qTest_ = sampling.testSample(sampleType='unifSpaced',
                                     qBound=qBound[i],
                                     nSamp=nTest[i])
        qTestList.append(qTest_.q)
    # Construct the Lagrange interpolation and evaluate it at the test samples
    fTest = lagInt(fNodes=fNodes,
                   qNodes=qNodes,
                   qTest=qTestList,
                   liDict={
                       'testRule': 'tensorProd'
                   }).val
    # Evaluate the exact model response over domRange
    qTestFull = []
    for i in range(p):
        qTestFull_ = np.linspace(domRange[i][0], domRange[i][1], nTest[i])
        qTestFull.append(qTestFull_)
    fTestFull = analyticTestFuncs.fEx2D(qTestFull[0], qTestFull[1], 'type1',
                                        'tensorProd').val
    fTestFullGrid = fTestFull.reshape((nTest[0], nTest[1]), order='F').T
    fTestGrid = fTest.reshape((nTest[0], nTest[1]), order='F').T
    # Plots
    plt.figure(figsize=(16, 8))
    plt.subplot(1, 2, 1)
    ax = plt.gca()
    CS1 = plt.contour(qTestFull[0], qTestFull[1], fTestFullGrid, 35)
    plt.clabel(CS1,
               inline=True,
               fontsize=15,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    qNodesGrid = reshaper.vecs2grid(qNodes)
    plt.plot(qNodesGrid[:, 0], qNodesGrid[:, 1], 'o', color='r', markersize=6)
    plt.xlabel(r'$q_1$', fontsize=25)
    plt.ylabel(r'$q_2$', fontsize=25)
    plt.xticks(fontsize=17)
    plt.yticks(fontsize=17)
    plt.title('Exact Response Surface')
    plt.subplot(1, 2, 2)
    ax = plt.gca()
    CS2 = plt.contour(qTestList[0], qTestList[1], fTestGrid, 20)
    plt.clabel(CS2,
               inline=True,
               fontsize=15,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.plot(qNodesGrid[:, 0], qNodesGrid[:, 1], 'o', color='r', markersize=6)
    plt.xlabel(r'$q_1$', fontsize=25)
    plt.ylabel(r'$q_2$', fontsize=25)
    plt.xticks(fontsize=17)
    plt.yticks(fontsize=17)
    plt.title('Response Surface by Lagrange Interpolation')
    plt.xlim(domRange[0])
    plt.ylim(domRange[1])
    plt.show()
예제 #8
0
def lagInt_3d_test():
    """
    Test Lagrange inerpolation over a 3D parameter space.
    """
    #----- SETTINGS -------------------
    nNodes = [8, 7, 6]  #number of training samples for q1, q2, q3
    sampType = [
        'GLL',  #Type of samples for q1, q2, q3
        'unifSpaced',
        'Clenshaw'
    ]
    qBound = [
        [-0.75, 1.5],  #range of parameters q1, q2, q3
        [-0.5, 2.5],
        [1, 3]
    ]
    nTest = [10, 11, 12]  #number of test samples for q1, q2, q3
    fOpts = {'a': 7, 'b': 0.1}  #parameters in Ishigami function
    #----------------------------------
    p = len(nNodes)
    # Generate the training samples
    qNodes = []
    for i in range(p):
        qNodes_ = sampling.trainSample(sampleType=sampType[i],
                                       qInfo=qBound[i],
                                       nSamp=nNodes[i])
        qNodes.append(qNodes_.q)
    # Run the simulator at the training samples
    fNodes = analyticTestFuncs.fEx3D(qNodes[0], qNodes[1], qNodes[2],
                                     'Ishigami', 'tensorProd', fOpts).val
    # Create the test samples and run the simultor at them
    qTest = []
    for i in range(p):
        qTest_ = sampling.testSample(sampleType='unifSpaced',
                                     qBound=qBound[i],
                                     nSamp=nTest[i])
        qTest.append(qTest_.q)
    fTestEx = analyticTestFuncs.fEx3D(qTest[0], qTest[1], qTest[2], 'Ishigami',
                                      'tensorProd', fOpts).val
    # Construct the Lagrange interpolation an evaluate it at the test samples
    fInterp = lagInt(fNodes=fNodes,
                     qNodes=qNodes,
                     qTest=qTest,
                     liDict={
                         'testRule': 'tensorProd'
                     }).val
    # Plot
    plt.figure(figsize=(14, 8))
    plt.subplot(2, 1, 1)
    fInterp_ = fInterp.reshape(np.asarray(np.prod(np.asarray(nTest))),
                               order='F')
    plt.plot(fInterp_, '-ob', mfc='none', label='Lagrange Interpolation')
    plt.plot(fTestEx, '--xr', ms=5, label='Exact Value')
    plt.ylabel(r'$f(q_1,q_2,q_3)$', fontsize=18)
    plt.xlabel(r'Test Sample Number', fontsize=14)
    plt.legend(loc='best', fontsize=14)
    plt.grid(alpha=0.4)
    plt.subplot(2, 1, 2)
    plt.plot(abs(fInterp_ - fTestEx), '-sk')
    plt.ylabel(r'$|f_{Interp}(q)-f_{Exact}(q)|$', fontsize=15)
    plt.xlabel(r'Test Sample Number', fontsize=14)
    plt.grid(alpha=0.4)
    plt.show()
예제 #9
0
def lagIntAtGQs_2d_test():
    """
       Test pce2pce_GQ(...) for 2D uncertain parameter space
    """
    #------ SETTINGS ----------------------------------------------------
    #Space 1
    nSamp1 = [6, 10]  #number of samples in PCE1, parameter 1,2
    space1 = [
        [-2, 1.5],  #admissible space of PCE1 (both parameters)
        [-3, 2.5]
    ]
    sampleType1 = ['GLL', 'unifRand']  #see trainSample class in sampling.py
    #Space 2
    nSamp2 = [4, 5]  #number of samples in PCE2, parameter 1,2
    space2 = [
        [-0.5, 1],  #admissible space of PCEw (both parameters)
        [-2., 1.5]
    ]
    #Test samples
    nTest = [100, 101]  #number of test samples of parameter 1,2
    #model function
    fType = 'type1'  #Type of simulator
    #---------------------------------------------------------------------
    p = 2
    distType2 = ['Unif', 'Unif']
    #(1) Generate samples from space 1
    q1 = []
    for i in range(p):
        q1_ = sampling.trainSample(sampleType=sampleType1[i],
                                   qInfo=space1[i],
                                   nSamp=nSamp1[i])
        space1[i] = [
            min(q1_.q), max(q1_.q)
        ]  #correction for uniform samples (otherwise contours are not plotted properly)
        q1.append(q1_.q)
    #Response values at the GL points
    fVal1 = analyticTestFuncs.fEx2D(q1[0], q1[1], fType, 'tensorProd').val
    #(2) Lagrange interpolation from samples 1 to GQ nodes on space 2
    q2, xi2, fVal2 = lagIntAtGQs(fVal1, q1, space1, nSamp2, space2, distType2)
    #(3) Construct a PCE on space 2
    pceDict = {
        'p': p,
        'sampleType': 'GQ',
        'pceSolveMethod': 'Projection',
        'truncMethod': 'TP',
        'distType': distType2
    }
    pce2 = pce(fVal=fVal2, xi=xi2, pceDict=pceDict, nQList=nSamp2)
    #(4) Evaluate the surrogates: Lagrange interpolation over space 1
    #                             PCE over space 2
    #test samples
    qTest1 = []
    xiTest2 = []
    qTest2 = []
    for i in range(p):
        testSamps1 = sampling.testSample('unifSpaced',
                                         qBound=space1[i],
                                         nSamp=nTest[i])
        qTest1.append(testSamps1.q)
        testSamps2 = sampling.testSample('unifSpaced',
                                         GQdistType=distType2[i],
                                         qBound=space2[i],
                                         nSamp=nTest[i])
        xiTest2.append(testSamps2.xi)
        qTest2.append(testSamps2.q)
    #evaluation
    #space 1
    fTest1_ex = analyticTestFuncs.fEx2D(qTest1[0], qTest1[1], fType,
                                        'tensorProd').val
    fTest1 = lagInt(fNodes=fVal1,
                    qNodes=q1,
                    qTest=qTest1,
                    liDict={
                        'testRule': 'tensorProd'
                    }).val
    #space 2
    pceEval2 = pceEval(coefs=pce2.coefs,
                       xi=xiTest2,
                       distType=distType2,
                       kSet=pce2.kSet)
    fTest2 = pceEval2.pceVal
    #(5) 2d contour plots
    plt.figure(figsize=(20, 8))
    plt.subplot(1, 3, 1)
    ax = plt.gca()
    fTest_Grid = fTest1_ex.reshape(nTest, order='F').T
    CS1 = plt.contour(qTest1[0], qTest1[1], fTest_Grid,
                      35)  #,cmap=plt.get_cmap('viridis'))
    plt.clabel(CS1,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.xlabel('q1')
    plt.ylabel('q2')
    plt.title('Exact response surface over space 1')
    #
    plt.subplot(1, 3, 2)
    ax = plt.gca()
    fTest1_Grid = fTest1.reshape(nTest, order='F').T
    CS2 = plt.contour(qTest1[0], qTest1[1], fTest1_Grid,
                      35)  #,cmap=plt.get_cmap('viridis'))
    plt.clabel(CS2,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    q1Grid = reshaper.vecs2grid(q1)
    plt.plot(q1Grid[:, 0], q1Grid[:, 1], 'ob', markersize=6)
    q2_ = reshaper.vecs2grid(q2)
    plt.plot(q2_[:, 0], q2_[:, 1], 'sr', markersize=6)
    plt.xlabel('q1')
    plt.ylabel('q2')
    plt.title(
        'Response surface by Lagrange Int.\n over space-1 using blue circles')
    #
    plt.subplot(1, 3, 3)
    ax = plt.gca()
    fTest2_Grid = fTest2.reshape(nTest, order='F').T
    CS3 = plt.contour(qTest2[0], qTest2[1], fTest2_Grid,
                      20)  #,cmap=plt.get_cmap('viridis'))
    plt.clabel(CS3,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.plot(q2_[:, 0], q2_[:, 1], 'sr', markersize=6)
    plt.xlabel('q1')
    plt.ylabel('q2')
    plt.title('Response surface by PCE over space-2 \n using red squares')
    plt.xlim(space1[0][:])
    plt.ylim(space1[1][:])
    plt.show()
예제 #10
0
def lagIntAtGQs_1d_test():
    """
    lagIntAtGQs test for 1D parameter
    """
    #------ SETTINGS --------------------
    #space 1
    nSampMod1 = [7]  #number of samples in PCE1
    space1 = [[-0.5, 2.]]  #admissible space of param in PCE1
    sampleType1 = 'GLL'  #see trainSample class in sampling.py
    #space 2
    distType2 = ['Unif']  #distribution type of the RV
    nSampMod2 = [5]  #number of samples in PCE2
    space2 = [[0.0, 1.5]]  #admissible space of param in PCE2
    #model function
    fType = 'type1'  #Type of simulator
    #test samples
    nTest = 100  #number of test samples
    #------------------------------------
    #(1) Generates samples from SpaceMod1
    qInfo_ = space1[0]
    samps1 = sampling.trainSample(sampleType=sampleType1,
                                  qInfo=qInfo_,
                                  nSamp=nSampMod1[0])
    q1 = samps1.q
    xi1 = samps1.xi
    qBound1 = samps1.qBound
    #Evaluate the simulator at samples1
    fEx = analyticTestFuncs.fEx1D(q1, fType, qInfo_)
    fVal1 = fEx.val
    #(2) Lagrange interpolation from samples 1 to GQ nodes on space 2
    q2, xi2, fVal2 = lagIntAtGQs(fVal1, [q1], space1, nSampMod2, space2,
                                 distType2)
    #(3) Construct a PCE over space 2 using the GQ nodes
    pceDict = {
        'p': 1,
        'sampleType': 'GQ',
        'pceSolveMethod': 'Projection',
        'distType': distType2
    }
    pce2 = pce(fVal=fVal2, xi=xi2[:, None], pceDict=pceDict)
    #(4) Evaluate the surrogates: Lagrange interpolation over space 1
    #                             PCE over space 2
    testSamps1 = sampling.testSample(sampleType='unifSpaced',
                                     qBound=space1[0],
                                     nSamp=nTest)
    qTest1 = testSamps1.q
    fTest1_ex = analyticTestFuncs.fEx1D(qTest1, fType, space1).val
    fTest1 = lagInt(fNodes=fVal1, qNodes=[q1], qTest=[qTest1]).val
    #
    testSamps2 = sampling.testSample(sampleType='unifSpaced',
                                     qBound=space2[0],
                                     nSamp=nTest)
    qTest2 = testSamps2.q
    xiTest2 = testSamps2.xi
    pcePred_ = pceEval(coefs=pce2.coefs, xi=[xiTest2], distType=distType2)
    fTest2 = pcePred_.pceVal
    #(5) Plot
    plt.figure(figsize=(15, 8))
    plt.plot(qTest1, fTest1_ex, '--k', lw=2, label=r'Exact $f(q)$')
    plt.plot(q1,
             fVal1,
             'ob',
             markersize=8,
             label='Original samples over space1')
    plt.plot(qTest1, fTest1, '-b', lw=2, label='Lagrange Int. over space 1')
    plt.plot(q2, fVal2, 'sr', markersize=8, label='GQ samples over space2')
    plt.plot(qTest2, fTest2, '-r', lw=2, label='PCE over space 2')
    plt.xlabel(r'$q$', fontsize=26)
    plt.ylabel(r'$f(q)$', fontsize=26)
    plt.xticks(fontsize=20)
    plt.yticks(fontsize=20)
    plt.grid(alpha=0.4)
    plt.legend(loc='best', fontsize=20)
    plt.show()
예제 #11
0
    def ppce_cnstrct_1d(self):
        """
        Constructing a probabilistic PCE over a 1D parameter space
        """
        print('... Probabilistic PCE for 1D input parameter.')
        p = self.p
        ppceDict = self.ppceDict
        qTrain = self.qTrain
        yTrain = self.yTrain
        noiseSdev = self.noiseV
        # (0) Assignments
        nGQ = ppceDict['nGQtest']
        qInfo = ppceDict['qInfo']
        nMC = ppceDict['nMC']
        nw_ = int(nMC / 10)
        distType = ppceDict['distType']
        # Make a dict for GPR
        gprOpts = {
            'nIter': ppceDict['nIter_gpr'],
            'lr': ppceDict['lr_gpr'],
            'convPlot': ppceDict['convPlot_gpr']
        }
        standardizeYTrain_ = False
        if 'standardizeYTrain_gpr' in ppceDict.keys():
            gprOpts.update(
                {'standardizeYTrain': ppceDict['standardizeYTrain_gpr']})
            standardizeYTrain_ = True

        # (1) Generate test points that are Gauss quadratures chosen based on the
        # distribution of q (gPCE rule)
        sampsGQ = sampling.trainSample(sampleType='GQ',
                                       GQdistType=distType,
                                       qInfo=qInfo,
                                       nSamp=nGQ)
        qTest = sampsGQ.q

        # (2) Construct GPR surrogate based on training data
        gpr_ = gpr_torch.gpr(qTrain[:, None], yTrain[:, None], noiseSdev,
                             qTest[:, None], gprOpts)
        post_f = gpr_.post_f
        post_obs = gpr_.post_y
        shift_ = 0.0
        scale_ = 1.0
        if standardizeYTrain_:
            shift_ = gpr_.shift[0]  # 0: single-response
            scale_ = gpr_.scale[0]

        # (3) Use samples of GPR tested at GQ nodes to construct a PCE
        fMean_list = []
        fVar_list = []
        pceDict = {
            'p': p,
            'sampleType': 'GQ',
            'pceSolveMethod': 'Projection',
            'distType': [distType]
        }
        for j in range(nMC):
            # Draw a sample for f(q) from GPR surrogate
            f_ = post_obs.sample().numpy() * scale_ + shift_
            # Construct PCE for the drawn sample
            pce_ = pce(fVal=f_, xi=[], pceDict=pceDict, verbose=False)  # GP+TP
            fMean_list.append(pce_.fMean)
            fVar_list.append(pce_.fVar)
            if ((j + 1) % nw_ == 0):
                print(
                    "...... ppce repetition for finding samples of the PCE coefficients, iter = %d/%d"
                    % (j + 1, nMC))

        # (4) Outputs
        fMean_list = np.asarray(fMean_list)
        fVar_list = np.asarray(fVar_list)
        # Optional outputs: only used for gprPlot
        optOut = {'post_f': post_f, 'post_obs': post_obs, 'qTest': [qTest]}
        self.fMean_samps = fMean_list
        self.fVar_samps = fVar_list
        self.optOut = optOut
예제 #12
0
    def ppce_cnstrct_pd(self):
        """
        Constructing a probabilistic PCE over a p-D parameter space, p>1
        """
        p = self.p
        print('... Probabilistic PCE for %d-D input parameter.' % p)
        ppceDict = self.ppceDict
        qTrain = self.qTrain
        yTrain = self.yTrain
        noiseSdev = self.noiseV
        # (0) Assignments
        nGQ = ppceDict['nGQtest']
        qInfo = ppceDict['qInfo']
        nMC = ppceDict['nMC']
        nw_ = int(nMC / 10)
        distType = ppceDict['distType']
        # Make a dict for gpr (do NOT change)
        gprOpts = {
            'nIter': ppceDict['nIter_gpr'],
            'lr': ppceDict['lr_gpr'],
            'convPlot': ppceDict['convPlot_gpr']
        }
        standardizeYTrain_ = False
        if 'standardizeYTrain_gpr' in ppceDict.keys():
            gprOpts.update(
                {'standardizeYTrain': ppceDict['standardizeYTrain_gpr']})
            standardizeYTrain_ = True

        # Make a dict for PCE (do NOT change)
        # Always use TP truncation with GQ sampling (hence Projection method)
        pceDict = {
            'p': p,
            'truncMethod': 'TP',
            'sampleType': 'GQ',
            'pceSolveMethod': 'Projection',
            'distType': distType
        }

        # (1) Generate test points that are Gauss quadratures chosen based on
        # the distribution of q (gPCE rule)
        qTestList = []
        for i in range(p):
            sampsGQ = sampling.trainSample(sampleType='GQ',
                                           GQdistType=distType[i],
                                           qInfo=qInfo[i],
                                           nSamp=nGQ[i])
            qTestList.append(sampsGQ.q)
        qTestGrid = reshaper.vecs2grid(qTestList)

        # (2) Construct GPR surrogate based on training data
        gpr_ = gpr_torch.gpr(qTrain, yTrain[:, None], noiseSdev, qTestGrid,
                             gprOpts)
        post_f = gpr_.post_f
        post_obs = gpr_.post_y
        shift_ = 0.0
        scale_ = 1.0
        if standardizeYTrain_:
            shift_ = gpr_.shift[0]  # 0: single-response
            scale_ = gpr_.scale[0]

        # optional: plot constructed response surface
        # gpr_torch.gprPlot().torch2d_3dSurf(qTrain,yTrain,qTestList,post_obs,shift=shift_,scale=scale_)

        # (3) Use samples of GPR tested at GQ nodes to construct a PCE
        fMean_list = []
        fVar_list = []
        for j in range(nMC):
            # Draw a sample for f(q) from GPR surrogate
            f_ = post_obs.sample().numpy() * scale_ + shift_
            # Construct PCE for the drawn sample
            pce_ = pce(fVal=f_,
                       nQList=nGQ,
                       xi=[],
                       pceDict=pceDict,
                       verbose=False)
            fMean_list.append(pce_.fMean)
            fVar_list.append(pce_.fVar)
            if ((j + 1) % nw_ == 0):
                print(
                    "...... ppce repetition for finding samples of the PCE coefficients, iter = %d/%d"
                    % (j + 1, nMC))

        # (4) Outputs
        fMean_list = np.asarray(fMean_list)
        fVar_list = np.asarray(fVar_list)
        # Optional outputs: only used for gprPlot
        optOut = {'post_f': post_f, 'post_obs': post_obs, 'qTest': qTestList}
        self.optOut = optOut
        self.fMean_samps = fMean_list
        self.fVar_samps = fVar_list
예제 #13
0
def sobol_2par_unif_test():
    """
      Test for sobol when we have 2 uncertain parameters q1, q2.
      Sobol indices are computed for f(q1,q2)=q1**2.+q1*q2 that is analyticTestFuncs.fEx2D('type3').
      Indices are computed from the following methods:
       * Method1: Direct computation by UQit
       * Method2: First a PCE is constructed and then its values are used to compute Sobol indices
       * Method3: Analytical expressions (reference values)
    """
    #--------------------------
    #------- SETTINGS
    n=[101, 100]       #number of samples for q1 and q2, Method1
    qBound=[[-3,1],   #admissible range of parameters
            [-1,2]]
    nQpce=[5,6]      #number of GQ points for Method2
    #--------------------------
    fType='type3'    #type of analytical function
    p=len(n)
    distType=['Unif']*p
    #(1) Samples from parameters space
    q=[]
    pdf=[]
    for i in range(p):
        q.append(np.linspace(qBound[i][0],qBound[i][1],n[i]))
        pdf.append(np.ones(n[i])/(qBound[i][1]-qBound[i][0]))
    #(2) Compute function value at the parameter samples
    fEx_=analyticTestFuncs.fEx2D(q[0],q[1],fType,'tensorProd')
    fEx=np.reshape(fEx_.val,n,'F')
    #(3) Compute Sobol indices direct numerical integration
    sobol_=sobol(q,fEx,pdf)
    Si=sobol_.Si
    STi=sobol_.STi
    Sij=sobol_.Sij

    #(4) Construct a PCE and then use the predictions of that in numerical integration
    #for computing Sobol indices.
    #Generate observations at Gauss-Legendre points
    xi=[]
    qpce=[]
    for i in range(p):
        samps=sampling.trainSample(sampleType='GQ',GQdistType=distType[i],qInfo=qBound[i],nSamp=nQpce[i])
        xi.append(samps.xi)
        qpce.append(samps.q)
    fVal_pceCnstrct=analyticTestFuncs.fEx2D(qpce[0],qpce[1],fType,'tensorProd').val
    #Construct the PCE
    xiGrid=reshaper.vecs2grid(xi)
    pceDict={'p':2,'sampleType':'GQ','truncMethod':'TP','pceSolveMethod':'Projection',
             'distType':distType}
    pce_=pce(fVal=fVal_pceCnstrct,nQList=nQpce,xi=xiGrid,pceDict=pceDict)

    #Use the PCE to predict at test samples from parameter space
    qpceTest=[]
    xiTest=[]
    for i in range(p):
        testSamps=sampling.testSample('unifSpaced',GQdistType=distType[i],qBound=qBound[i],nSamp=n[i])
        xiTest.append(testSamps.xi)
        qpceTest.append(testSamps.q)
    fPCETest_=pceEval(coefs=pce_.coefs,kSet=pce_.kSet,xi=xiTest,distType=distType)
    fPCETest=fPCETest_.pceVal
    #compute Sobol indices
    sobolPCE_=sobol(qpceTest,fPCETest,pdf)
    Si_pce=sobolPCE_.Si
    Sij_pce=sobolPCE_.Sij

    #(5) Exact Sobol indices (analytical expressions)
    if fType=='type3':
       fEx_.sobol(qBound)
       Si_ex=fEx_.Si
       STi_ex=fEx_.STi
       Sij_ex=fEx_.Sij

    #(6) results
    print(' > Main Indices by UQit:\n\t S1=%g, S2=%g, S12=%g' %(Si[0],Si[1],Sij[0]))
    print(' > Main indice by gPCE+Numerical Integration:\n\t S1=%g, S2=%g, S12=%g' %(Si_pce[0],Si_pce[1],Sij_pce[0]))
    print(' > Main Analytical Reference:\n\t S1=%g, S2=%g, S12=%g' %(Si_ex[0],Si_ex[1],Sij_ex[0]))
    print(' > Total Indices by UQit:\n\t ST1=%g, ST2=%g' %(STi[0],STi[1]))
    print(' > Total Analytical Reference:\n\t ST1=%g, ST2=%g' %(STi_ex[0],STi_ex[1]))
예제 #14
0
def pce_3d_test():
    """
    Test PCE for 3D uncertain parameter
    """
    #----- SETTINGS------------
    distType = ['Unif', 'Unif', 'Unif']  #distribution type of the parameters
    qInfo = [
        [-0.75, 1.5],  #range of parameters
        [-0.5, 2.5],
        [1.0, 3.0]
    ]
    nQ = [6, 5, 4]  #number of parameter samples in the 3 dimensions
    funOpt = {'a': 7, 'b': 0.1}  #parameters in the Ishigami function
    #PCE options
    truncMethod = 'TO'  #'TP'=Tensor Product
    #'TO'=Total Order
    sampleType = 'GQ'  #'GQ'=Gauss Quadrature nodes
    #other types: see trainSample in sampling.py
    pceSolveMethod = 'Regression'  #'Regression': for any combination of sample points and truncation methods
    #'Projection': only for 'GQ'+'TP'
    nTest = [5, 4, 3]  #number of test samples for the parameters
    if truncMethod == 'TO':
        LMax = 10  #max polynomial order in each parameter direction
    #--------------------
    p = len(distType)
    #Assemble the pceDict
    pceDict = {
        'p': p,
        'truncMethod': truncMethod,
        'sampleType': sampleType,
        'pceSolveMethod': pceSolveMethod,
        'distType': distType
    }
    if truncMethod == 'TO':
        pceDict.update({'LMax': LMax})
    #Generate training data
    xi = []
    q = []
    qBound = []
    for i in range(p):
        samps = sampling.trainSample(sampleType=sampleType,
                                     GQdistType=distType[i],
                                     qInfo=qInfo[i],
                                     nSamp=nQ[i])
        xi.append(samps.xi)
        q.append(samps.q)
        qBound.append(samps.qBound)
    fEx = analyticTestFuncs.fEx3D(q[0], q[1], q[2], 'Ishigami', 'tensorProd',
                                  funOpt)
    fVal = fEx.val
    #Construct the PCE
    xiGrid = reshaper.vecs2grid(xi)
    pce_ = pce(fVal=fVal, xi=xiGrid, pceDict=pceDict, nQList=nQ)
    fMean = pce_.fMean
    fVar = pce_.fVar
    pceCoefs = pce_.coefs
    kSet = pce_.kSet
    #Convergence of the PCE terms
    convPlot(coefs=pceCoefs, distType=distType, kSet=kSet)
    #Exact moments of the Ishigami function
    fEx.moments(qInfo=qBound)
    m = fEx.mean
    v = fEx.var
    #Compare the moments estimated by PCE with the exact analytical values
    print(writeUQ.printRepeated('-', 50))
    print('\t\t Exact \t\t PCE')
    print('E[f]:  ', m, fMean)
    print('V[f]:  ', v, fVar)
    print(writeUQ.printRepeated('-', 50))
    #Compare the PCE predictions at test points with the exact values of the model response
    qTest = []
    xiTest = []
    for i in range(p):
        testSamps = sampling.testSample('unifSpaced',
                                        GQdistType=distType[i],
                                        qInfo=qInfo[i],
                                        qBound=qBound[i],
                                        nSamp=nTest[i])
        qTest.append(testSamps.q)
        xiTest.append(testSamps.xi)
    fVal_test_ex = analyticTestFuncs.fEx3D(qTest[0], qTest[1], qTest[2],
                                           'Ishigami', 'tensorProd',
                                           funOpt).val
    #PCE prediction at test points
    pcePred_ = pceEval(coefs=pceCoefs, xi=xiTest, distType=distType, kSet=kSet)
    fVal_test_pce = pcePred_.pceVal
    #Plot the exact and PCE response values
    nTest_ = np.prod(np.asarray(nTest))
    fVal_test_pce_ = fVal_test_pce.reshape(nTest_, order='F')
    err = np.linalg.norm(fVal_test_pce_ - fVal_test_ex)
    plt.figure(figsize=(10, 4))
    plt.plot(fVal_test_pce_, '-ob', mfc='none', ms=5, label='Exact')
    plt.plot(fVal_test_ex, '-xr', ms=5, label='PCE')
    plt.xlabel('Index of test samples, k')
    plt.ylabel('Model response')
    plt.legend(loc='best')
    plt.grid(alpha=0.4)
    plt.show()
    print('||fEx(q)-fPCE(q)|| % = ', err * 100)
예제 #15
0
def pce_1d_test():
    """
    Test PCE for 1D uncertain parameter 
    """
    #--- settings -------------------------
    #Parameter settings
    distType = 'Norm'  #distribution type of the parameter
    if distType == 'Unif':
        qInfo = [-2, 4.0]  #parameter range only if 'Unif'
        fType = 'type1'  #Type of test exact model function
    elif distType == 'Norm':
        qInfo = [.5, 0.9]  #[m,v] for 'Norm' q~N(m,v^2)
        fType = 'type2'  #Type of test exact model function
    n = 20  #number of training samples
    nTest = 200  #number of test sample sin the parameter space
    #PCE Options
    sampleType = 'GQ'  #'GQ'=Gauss Quadrature nodes
    #''= any other sample => only 'Regression' can be selected
    # see `trainSample` class in sampling.py
    pceSolveMethod = 'Projection'  #'Regression': for any combination of sample points
    #'Projection': only for GQ
    LMax_ = 10  #number of terms (=K) in PCE (Only used with Regresson method)
    #(LMax will be over written by nSamples if it is provided for 'GQ'+'Projection')
    #-------------------------------------
    #(0) Make the pceDict
    pceDict = {
        'p': 1,
        'sampleType': sampleType,
        'pceSolveMethod': pceSolveMethod,
        'LMax': LMax_,
        'distType': [distType]
    }
    #(1) Generate training data
    samps = sampling.trainSample(sampleType=sampleType,
                                 GQdistType=distType,
                                 qInfo=qInfo,
                                 nSamp=n)
    q = samps.q
    xi = samps.xi
    qBound = samps.qBound
    fEx = analyticTestFuncs.fEx1D(q, fType, qInfo)
    f = fEx.val
    #(2) Compute the exact moments (as the reference data)
    fEx.moments(qInfo)
    fMean_ex = fEx.mean
    fVar_ex = fEx.var
    #(3) Construct the PCE
    pce_ = pce(fVal=f, xi=xi[:, None], pceDict=pceDict)
    fMean = pce_.fMean  #E[f(q)] estimated by PCE
    fVar = pce_.fVar  #V[f(q)] estimated by PCE
    pceCoefs = pce_.coefs  #Coefficients in the PCE
    #(4) Compare moments: exact vs. PCE estimations
    print(writeUQ.printRepeated('-', 70))
    print('-------------- Exact -------- PCE --------- Error % ')
    print('Mean of f(q) = %g\t%g\t%g' % (fMean_ex, fMean,
                                         (fMean - fMean_ex) / fMean_ex * 100.))
    print('Var  of f(q) = %g\t%g\t%g' % (fVar_ex, fVar,
                                         (fVar - fVar_ex) / fVar_ex * 100.))
    print(writeUQ.printRepeated('-', 70))
    #(5) Plots
    # Plot convergence of the PCE coefficients
    convPlot(coefs=pceCoefs, distType=distType)
    #
    #(6) Evaluate the PCE at test samples
    # Test samples
    testSamps = sampling.testSample('unifSpaced',
                                    GQdistType=distType,
                                    qInfo=qInfo,
                                    qBound=qBound,
                                    nSamp=nTest)
    qTest = testSamps.q
    xiTest = testSamps.xi
    fTest = analyticTestFuncs.fEx1D(qTest, fType,
                                    qInfo).val  #exact response at test samples
    #Prediction by PCE at test samples
    pcePred_ = pceEval(coefs=pceCoefs, xi=[xiTest], distType=[distType])
    fPCE = pcePred_.pceVal
    #Exact and PCE response surface
    plt.figure(figsize=(12, 5))
    ax = plt.gca()
    plt.plot(qTest, fTest, '-k', lw=2, label=r'Exact $f(q)$')
    plt.plot(q, f, 'ob', label=sampleType + ' Training Samples')
    plt.plot(qTest, fPCE, '-r', lw=2, label='PCE')
    plt.plot(qTest,
             fMean * np.ones(len(qTest)),
             '-b',
             label=r'$\mathbb{E}[f(q)]$')
    ax.fill_between(qTest,
                    fMean + 1.96 * mt.sqrt(fVar) * np.ones(len(qTest)),
                    fMean - 1.96 * mt.sqrt(fVar) * np.ones(len(qTest)),
                    color='powderblue',
                    alpha=0.4)
    plt.plot(qTest,
             fMean + 1.96 * mt.sqrt(fVar) * np.ones(len(qTest)),
             '--b',
             label=r'$\mathbb{E}[f(q)]\pm 95\%CI$')
    plt.plot(qTest, fMean - 1.96 * mt.sqrt(fVar) * np.ones(len(qTest)), '--b')
    plt.title('Example of 1D PCE for random variable of type %s' % distType)
    plt.xlabel(r'$q$', fontsize=19)
    plt.ylabel(r'$f(q)$', fontsize=19)
    plt.xticks(fontsize=18)
    plt.yticks(fontsize=18)
    plt.grid(alpha=0.3)
    plt.legend(loc='best', fontsize=17)
    plt.show()
예제 #16
0
def pce_2d_test():
    """
    Test PCE for 2D uncertain parameter
    """
    #---- SETTINGS------------
    #Parameters specifications
    distType = ['Norm', 'Norm']  #distribution type of the parameters q1, q2
    qInfo = [
        [-2, 1],  #info on parameters
        [-2, 0.4]
    ]
    nQ = [7, 6]  #number of training samples of parameters
    nTest = [121,
             120]  #number of test points in parameter spaces to evaluate PCE
    #PCE Options
    truncMethod = 'TO'  #'TP'=Tensor Product
    #'TO'=Total Order
    sampleType = [
        'GQ', 'GQ'
    ]  #'GQ'=Gauss Quadrature nodes ('Projection' or 'Regression')
    #For other type of samples, see sampling.py, trainSample => only 'Regression' can be used
    #'LHS': Latin Hypercube Sampling (only when all distType='Unif')
    fType = 'type1'  #Type of the exact model response, 'type1', 'type2', 'type3', 'Rosenbrock'
    pceSolveMethod = 'Regression'  #'Regression': for any combination of sampling and truncation methods
    #'Projection': only for 'GQ'+'TP'
    if truncMethod == 'TO':
        LMax = 8  #max polynomial order in each parameter dimention
    #------------------------
    p = len(distType)
    #Assemble the pceDict
    pceDict = {
        'p': p,
        'truncMethod': truncMethod,
        'sampleType': sampleType,
        'pceSolveMethod': pceSolveMethod,
        'distType': distType
    }
    if truncMethod == 'TO':
        pceDict.update({'LMax': LMax, 'pceSolveMethod': 'Regression'})
    #Generate the training data
    xi = []
    q = []
    qBound = []
    if sampleType[0] == 'LHS' and sampleType[1] == 'LHS':
        if distType == ['Unif'] * p:
            qBound = qInfo
            xi = sampling.LHS_sampling(nQ[0] * nQ[1], [[-1, 1]] * p)
            for i in range(p):
                q.append(pce.mapFromUnit(xi[:, i], qBound[i]))
            fEx_ = analyticTestFuncs.fEx2D(q[0], q[1], fType, 'comp')
            xiGrid = xi
        else:
            raise ValueError(
                "LHS works only when all q have 'Unif' distribution.")
    else:
        for i in range(p):
            samps = sampling.trainSample(sampleType=sampleType[i],
                                         GQdistType=distType[i],
                                         qInfo=qInfo[i],
                                         nSamp=nQ[i])
            q.append(samps.q)
            xi.append(samps.xi)
            qBound.append(samps.qBound)
        fEx_ = analyticTestFuncs.fEx2D(q[0], q[1], fType, 'tensorProd')
        xiGrid = reshaper.vecs2grid(xi)
    fVal = fEx_.val
    #Construct the PCE
    pce_ = pce(fVal=fVal, xi=xiGrid, pceDict=pceDict, nQList=nQ)
    fMean = pce_.fMean
    fVar = pce_.fVar
    pceCoefs = pce_.coefs
    kSet = pce_.kSet
    #Plot the convergence indicator of the PCE
    convPlot(coefs=pceCoefs, distType=distType, kSet=kSet)
    #Generate test samples for the parameters and evaluate the exact response surface at them
    qTest = []
    xiTest = []
    for i in range(p):
        testSamps = sampling.testSample('unifSpaced',
                                        GQdistType=distType[i],
                                        qInfo=qInfo[i],
                                        qBound=qBound[i],
                                        nSamp=nTest[i])
        qTest_ = testSamps.q
        xiTest_ = testSamps.xi
        qTest.append(qTest_)
        xiTest.append(xiTest_)
    fTest = analyticTestFuncs.fEx2D(qTest[0], qTest[1], fType,
                                    'tensorProd').val
    #Evaluate PCE at the test samples
    pcePred_ = pceEval(coefs=pceCoefs, xi=xiTest, distType=distType, kSet=kSet)
    fPCE = pcePred_.pceVal
    #Use MC method to directly estimate reference values for the mean and varaiance of f(q)
    fEx_.moments(distType, qInfo)
    fMean_mc = fEx_.mean
    fVar_mc = fEx_.var

    #Compare the PCE estimates for moments of f(q) with the reference values from MC
    print(writeUQ.printRepeated('-', 70))
    print('------------ MC -------- PCE --------- Error % ')
    print('Mean of f(q) = %g\t%g\t%g' % (fMean_mc, fMean,
                                         (fMean - fMean_mc) / fMean_mc * 100.))
    print('Var  of f(q) = %g\t%g\t%g' % (fVar_mc, fVar,
                                         (fVar - fVar_mc) / fVar_mc * 100.))
    print(writeUQ.printRepeated('-', 70))
    #Plot the exact and PCE response surfaces as contours in the parameters space
    # Create 2D grid from the test samples and plot the contours of response surface over it
    fTestGrid = fTest.reshape(nTest, order='F')
    fErrorGrid = (abs(fTestGrid - fPCE))
    # 2D grid from the sampled parameters
    if sampleType[0] == 'LHS' and sampleType[1] == 'LHS':
        qGrid = reshaper.vecsGlue(q[0], q[1])
    else:
        qGrid = reshaper.vecs2grid(q)
    plt.figure(figsize=(21, 8))
    plt.subplot(1, 3, 1)
    ax = plt.gca()
    CS1 = plt.contour(qTest[0], qTest[1], fTestGrid.T, 40)
    plt.clabel(CS1,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.plot(qGrid[:, 0], qGrid[:, 1], 'o', color='r', markersize=7)
    plt.xlabel(r'$q_1$')
    plt.ylabel(r'$q_2$')
    plt.title('Exact Response')
    plt.subplot(1, 3, 2)
    ax = plt.gca()
    CS2 = plt.contour(qTest[0], qTest[1], fPCE.T, 40)
    plt.clabel(CS2,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.plot(qGrid[:, 0], qGrid[:, 1], 'o', color='r', markersize=7)
    plt.xlabel(r'$q_1$')
    plt.ylabel(r'$q_2$')
    plt.title('PCE Response')
    plt.subplot(1, 3, 3)
    ax = plt.gca()
    CS3 = plt.contour(qTest[0], qTest[1], fErrorGrid.T, 40)
    plt.clabel(CS3,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.xlabel(r'$q_1$')
    plt.ylabel(r'$q_2$')
    plt.plot(qGrid[:, 0], qGrid[:, 1], 'o', color='r', markersize=7)
    plt.title('|Exact-Surrogate|')
    plt.show()