Ejemplo n.º 1
0
    def trainDataGen(p,sampleType,n,qBound,fExName,noiseType):
        """
        Generate Training Data
        """
        #  (a) xTrain 
        if sampleType=='grid': 
          nSamp=n[0]*n[1]
          gridList=[]
          for i in range(p):
              #grid_=torch.linspace(qBound[i][0],qBound[i][1],n[i])   #torch
              grid_=np.linspace(qBound[i][0],qBound[i][1],n[i])
              gridList.append(grid_)
          xTrain=reshaper.vecs2grid(gridList)
#       xTrain = gpytorch.utils.grid.create_data_from_grid(gridList)  #torch
        elif sampleType=='random': 
             nSamp=n     #number of random samples   
             xTrain=sampling.LHS_sampling(n,qBound)
        #  (b) Observation noise   
        #noiseSdev=torch.ones(nTot).mul(0.1)    #torch
        noiseSdev=noiseGen(nSamp,noiseType,xTrain,fExName)
        #yTrain = torch.sin(mt.pi*xTrain[:,0])*torch.cos(.25*mt.pi*xTrain[:,1])+
        #         torch.randn_like(xTrain[:,0]).mul(0.1)   #torch
        #  (c) Training response
        yTrain=analyticTestFuncs.fEx2D(xTrain[:,0],xTrain[:,1],fExName,'comp').val
        yTrain_noiseFree=yTrain
        yTrain=yTrain_noiseFree+noiseSdev*np.random.randn(nSamp)
        return xTrain,yTrain,noiseSdev,yTrain_noiseFree
Ejemplo n.º 2
0
 def fEx(p, sampleType, n, qInfo, fExName):
     """
     Generate synthetic training data
     """
     #  (a) xTrain
     nSamp = n[0] * n[1]
     xi = []
     q = []
     qBound = []
     if sampleType[0] == 'LHS' and sampleType[1] == 'LHS':
         if distType == ['Unif'] * p:
             qBound = qInfo
             xi = sampling.LHS_sampling(nSamp, [[-1, 1]] * p)
             xTrain = np.zeros((nSamp, p))
             for i in range(p):
                 xTrain[:, i] = pce.mapFromUnit(xi[:, i], qBound[i])
             fEx_ = analyticTestFuncs.fEx2D(xTrain[:, 0], xTrain[:, 1],
                                            fExName, 'comp')
         else:
             raise ValueError(
                 "LHS works only when all q have 'Unif' distribution.")
     else:
         for i in range(p):
             samps = sampling.trainSample(sampleType=sampleType[i],
                                          GQdistType=distType[i],
                                          qInfo=qInfo[i],
                                          nSamp=n[i])
             q.append(samps.q)
         xTrain = reshaper.vecs2grid(q)
         fEx_ = analyticTestFuncs.fEx2D(q[0], q[1], fExName, 'tensorProd')
     return xTrain, fEx_
Ejemplo n.º 3
0
    def trainData():
        """
        Generate training data
        """
        qBound = [[-1, 1], [-1, 1]]
        x1_ = sampling.trainSample(sampleType='GQ',
                                   GQdistType='Unif',
                                   qInfo=qBound[0],
                                   nSamp=4)
        x2_ = sampling.trainSample(sampleType='GQ',
                                   GQdistType='Unif',
                                   qInfo=qBound[1],
                                   nSamp=4)
        xTrain = reshaper.vecs2grid([x1_.q, x2_.q])
        yTrain_mean = np.asarray([
            -0.0169906, -0.0191095, -0.0167435, -0.0172338, -0.0203195,
            -0.020089, -0.0184691, -0.0188843, -0.0164581, -0.0200013,
            -0.0186512, -0.0159343, -0.0185975, -0.0155899, -0.0178921,
            -0.018329
        ])
        yTrain_sdev = np.asarray([
            0.00131249, 0.00104324, 0.00085491, 0.00099751, 0.00094231,
            0.00102579, 0.0010804, 0.00089567, 0.00081245, 0.0011208,
            0.00110756, 0.00126673, 0.00108875, 0.00145115, 0.00098541,
            0.00130559
        ])

        return qBound, xTrain, yTrain_mean, yTrain_sdev
Ejemplo n.º 4
0
 def trainDataGen(p, sampleType, n, qBound, fExName, noiseType):
     """
     Generate Training Data
     """
     #  (a) xTrain
     if sampleType == 'grid':
         nSamp = n[0] * n[1]
         gridList = []
         for i in range(p):
             grid_ = np.linspace(qBound[i][0], qBound[i][1], n[i])
             gridList.append(grid_)
         xTrain = reshaper.vecs2grid(gridList)
     elif sampleType == 'random':
         nSamp = n  # number of random samples
         xTrain = sampling.LHS_sampling(n, qBound)
     #  (b) Observation noise
     noiseSdev = noiseGen(nSamp, noiseType, xTrain, fExName)
     #  (c) Training response
     yTrain = analyticTestFuncs.fEx2D(xTrain[:, 0], xTrain[:, 1], fExName,
                                      'comp').val
     yTrain_noiseFree = yTrain
     yTrain = yTrain_noiseFree + noiseSdev * np.random.randn(nSamp)
     return xTrain, yTrain, noiseSdev, yTrain_noiseFree
Ejemplo n.º 5
0
def lagInt_2d_test():
    """
    Test Lagrange inerpolation over a 2D parameter space.
    """
    #----- SETTINGS --------------------------------------------------------------
    nNodes = [
        5, 4
    ]  #number of  training samples nodes in space of parameters q1, q2
    sampType = [
        'GLL',  #Method of drawing samples for q1, q2
        'unifSpaced'
    ]
    qBound = [
        [-0.75, 1.5],  # admissible range of parameters
        [-0.5, 2.5]
    ]

    # Settings of the exact response surface
    domRange = [
        [-2, 2],  #domain range for q1, q2
        [-3, 3]
    ]
    nTest = [100, 101]  #number of test samples
    #-----------------------------------------------------------------------------
    p = len(nNodes)
    # Create the training samples over each parameter space
    qNodes = []
    for i in range(p):
        qNodes_ = sampling.trainSample(sampleType=sampType[i],
                                       qInfo=qBound[i],
                                       nSamp=nNodes[i])
        qNodes.append(qNodes_.q)
    # Evaluate the simulator at each joint sample
    fNodes = analyticTestFuncs.fEx2D(qNodes[0], qNodes[1], 'type1',
                                     'tensorProd').val
    # Generate the test samples
    qTestList = []
    for i in range(p):
        qTest_ = sampling.testSample(sampleType='unifSpaced',
                                     qBound=qBound[i],
                                     nSamp=nTest[i])
        qTestList.append(qTest_.q)
    # Construct the Lagrange interpolation and evaluate it at the test samples
    fTest = lagInt(fNodes=fNodes,
                   qNodes=qNodes,
                   qTest=qTestList,
                   liDict={
                       'testRule': 'tensorProd'
                   }).val
    # Evaluate the exact model response over domRange
    qTestFull = []
    for i in range(p):
        qTestFull_ = np.linspace(domRange[i][0], domRange[i][1], nTest[i])
        qTestFull.append(qTestFull_)
    fTestFull = analyticTestFuncs.fEx2D(qTestFull[0], qTestFull[1], 'type1',
                                        'tensorProd').val
    fTestFullGrid = fTestFull.reshape((nTest[0], nTest[1]), order='F').T
    fTestGrid = fTest.reshape((nTest[0], nTest[1]), order='F').T
    # Plots
    plt.figure(figsize=(16, 8))
    plt.subplot(1, 2, 1)
    ax = plt.gca()
    CS1 = plt.contour(qTestFull[0], qTestFull[1], fTestFullGrid, 35)
    plt.clabel(CS1,
               inline=True,
               fontsize=15,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    qNodesGrid = reshaper.vecs2grid(qNodes)
    plt.plot(qNodesGrid[:, 0], qNodesGrid[:, 1], 'o', color='r', markersize=6)
    plt.xlabel(r'$q_1$', fontsize=25)
    plt.ylabel(r'$q_2$', fontsize=25)
    plt.xticks(fontsize=17)
    plt.yticks(fontsize=17)
    plt.title('Exact Response Surface')
    plt.subplot(1, 2, 2)
    ax = plt.gca()
    CS2 = plt.contour(qTestList[0], qTestList[1], fTestGrid, 20)
    plt.clabel(CS2,
               inline=True,
               fontsize=15,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.plot(qNodesGrid[:, 0], qNodesGrid[:, 1], 'o', color='r', markersize=6)
    plt.xlabel(r'$q_1$', fontsize=25)
    plt.ylabel(r'$q_2$', fontsize=25)
    plt.xticks(fontsize=17)
    plt.yticks(fontsize=17)
    plt.title('Response Surface by Lagrange Interpolation')
    plt.xlim(domRange[0])
    plt.ylim(domRange[1])
    plt.show()
Ejemplo n.º 6
0
def lagIntAtGQs(fValM1, qM1, spaceM1, nM2, spaceM2, distType):
    """    
    Given response values `fValM1` at `nM1` arbitrary samples over the p-D `spaceM1`, the values at 
    `nM2` Gauss quadrature points over `spaceM2` are computed using Lagrange interpolation. 

       * Both `spaceM1` and `spaceM2` have the same dimension `p`.
       * At each of the p dimensions, ||`spaceM2`||<||`spaceM1`|| at each dimension.
       * The Gauss quadrature nodes over 'spaceM2' can be the abscissa of different types of 
         polynomials based on the `distType` and the gPCE rule. 
       * A tensor-product grid from the GQ nodes on `spaceM2` is created

    args:    
      `qM1`: List of length p 
         List of samples on `spaceM1`; qM1=[qM1_1,qM1_2,...,qM1_p] where `qM1_i` is a 1D numpy 
         array of size nM1_i, for i=1,2,...,p. 
      `fValM1`: numpy p-D array of shape (nM1_1,nM1_2,...,nM1_p).
         Response values at `qM1`
      `spaceM1`: List of length p.
         =[spaceM1_1,spaceM1_2,...,spaceM1_p] where spaceM1_i is a list of two elements, 
         specifying the admissible range of qM1_i, for i=1,2,...,p.
      `nM2` List of length p,
         Containing the number of Gauss quadrature samples `qM2` in each parameter dimension, 
         nM2=[nM2_1,nM2_2,...,nM2_p]
      `spaceM2`: List of length p.
         =[spaceM2_1,spaceM2_2,...,spaceM2_p] where spaceM2_i is a list of two elements, 
         specifying the admissible range of qM2_i, for i=1,2,...,p.
      `distType`: List of length p with string members
         The i-th element specifies the distribution type of the i-th parameter according to 
         the gPCE rule.

    Returns:
      `qM2`: List of length p 
         List of samples on `spaceM2`; qM2=[qM2_1,qM2_2,...,qM2_p] where `qM2_i` is a 1D numpy 
         array of size nM2_i, for i=1,2,...,p. 
      `xiM2`: numpy array of shape (nM2_1*nM2_2*...*nM2_p,p)   
         Tensor-product grid of Gauss-quadrature nodes on the mapped space of `spaceM2`
      `fValM2`: 1D numpy array of size (nM1_1*nM1_2*...*nM1_p).
         Interpolated response values at `xiM2`
    """
    # (1) Check the inputs
    ndim = len(spaceM1)
    if (ndim != len(spaceM2) or ndim != len(qM1)):
        raise ValueError(
            'SpaceM1 and SpaceM2 should have the same dimension, p.')
    for idim in range(ndim):
        d1 = spaceM1[idim][1]-spaceM1[idim][0]
        d2 = spaceM2[idim][1]-spaceM2[idim][0]
        if (d2 > d1):
            print("Wrong parameter range in direction ", ldim+1)
            raise ValueError("||spaceM2|| should be smaller than ||spaceM1||.")
    # (2) Construct the Gauss-quadrature stochastic samples for model2
    qM2 = []
    xiM2 = []
    for i in range(ndim):
        xi_, w = pce.gqPtsWts(nM2[i], distType[i])
        qM2_ = pce.mapFromUnit(xi_, spaceM2[i])
        qM2.append(qM2_)
        xiM2.append(xi_)
    if (ndim == 1):
        qM2 = qM2[0]
        xiM2 = xiM2[0]
    elif (ndim > 1):
        xiM2 = reshaper.vecs2grid(xiM2)
    # (3) Use lagrange interpolation to find values at q2, given fVal1 at q1
    if ndim == 1:
        fVal2Interp = lagInt(fNodes=fValM1, qNodes=[qM1[0]], qTest=[qM2]).val
    elif (ndim > 1):
        fVal2Interp_ = lagInt(fNodes=fValM1, qNodes=qM1, qTest=qM2, liDict={
                              'testRule': 'tensorProd'}).val
        nM2_ = fVal2Interp_.size
        fVal2Interp = fVal2Interp_.reshape(nM2_, order='F')
    return qM2, xiM2, fVal2Interp
Ejemplo n.º 7
0
    def psobol_cnstrct(self):
        """
        Constructing probabilistic Sobol indices over a p-D parameter space, p>1
        """
        p = self.p
        print('... Probabilistic Sobol indices for %d-D input parameter.' % p)
        psobolDict = self.psobolDict
        qTrain = self.qTrain
        yTrain = self.yTrain
        noiseSdev = self.noiseV
        # (0) Assignments
        qTest = psobolDict['qTest']
        pdf = psobolDict['pdf']
        nMC = psobolDict['nMC']
        nw_ = int(nMC / 10)
        # Make a dict for gpr (do NOT change)
        gprOpts = {
            'nIter': psobolDict['nIter_gpr'],
            'lr': psobolDict['lr_gpr'],
            'convPlot': psobolDict['convPlot_gpr']
        }
        standardizeYTrain_ = False
        if 'standardizeYTrain_gpr' in psobolDict.keys():
            gprOpts.update(
                {'standardizeYTrain': psobolDict['standardizeYTrain_gpr']})
            standardizeYTrain_ = True

        # (1) Generate a tensor product grid from qTest. At the grid samples, the gpr is sampled.
        qTestGrid = reshaper.vecs2grid(qTest)

        # (2) Construct GPR surrogate based on training data
        gpr_ = gpr_torch.gpr(qTrain, yTrain[:, None], noiseSdev, qTestGrid,
                             gprOpts)
        post_f = gpr_.post_f
        post_obs = gpr_.post_y
        shift_ = 0.0
        scale_ = 1.0
        if standardizeYTrain_:
            shift_ = gpr_.shift[0]  # 0: single-response
            scale_ = gpr_.scale[0]

        # optional: plot constructed response surface only for p==2
        # gpr_torch.gprPlot().torch2d_3dSurf(qTrain,yTrain,qTest,post_obs,shift=shift_,scale=scale_)

        # (3) Compute Sobol indices for samples of GPR generated at qTestGrid
        Si_list_ = []
        Sij_list_ = []
        STi_list_ = []
        for j in range(nMC):
            # Draw a sample for f(q) from GPR surrogate
            f_ = post_obs.sample().numpy() * scale_ + shift_
            f_ = np.reshape(f_, self.nTest, 'F')
            # Compute the Sobol indices
            sobol_ = sobol(qTest, f_, pdf)
            Si_list_.append(sobol_.Si)
            Sij_list_.append(sobol_.Sij)
            STi_list_.append(sobol_.STi)
            if ((j + 1) % nw_ == 0):
                print(
                    "...... psobol repetition for finding samples of the Sobol indices, iter = %d/%d"
                    % (j + 1, nMC))
        # reshape lists and arrays
        S_ = np.zeros(nMC)
        ST_ = np.zeros(nMC)
        Si_list = []
        Sij_list = []
        STi_list = []
        for i in range(p):
            for j in range(nMC):
                S_[j] = Si_list_[j][i]
                ST_[j] = STi_list_[j][i]
            Si_list.append(S_.copy())
            STi_list.append(ST_.copy())

        for i in range(len(Sij_list_[0])):
            for j in range(nMC):
                S_[j] = Sij_list_[j][i]
            Sij_list.append(S_.copy())

        self.Si_samps = Si_list
        self.Sij_samps = Sij_list
        self.STi_samps = STi_list
        self.SijName = sobol_.SijName

        # (4) Outputs
        # Optional outputs: can only be used for gprPlot
        optOut = {'post_f': post_f, 'post_obs': post_obs}
        self.optOut = optOut
Ejemplo n.º 8
0
    def interp_pd(self):
        R"""
      Lagrange interpolation of order :math:`(n_1-1)*(n_2-1)*...*(n_p-1)` constructed over a 
      p-D parameter space. Here, :math:`n_k, k=1,2,...,p` refers to the number of training nodes in the i-th dimension of the parameter space.

      .. math::
          F(\mathbf{Q})=\sum_{k_1=1}^{n_1} ... \sum_{k_p=1}^{n_p} [fNodes(k_1,k_2,...,k_p) L_{k_1}(Q_1) L_{k_2}(Q_2) ... L_{k_p}(Q_p)]
         
      where, :math:`L_{k_i}(Q_p)` is the single-variate Lagrange basis in the i-th dimension. 
      """
        Q = np.asarray(self.qTest)
        qNodes = self.qNodes
        fNodes = self.fNodes
        p = self.p
        nList = []  #list of the number of nodes in each dimension
        mList = []  #list of the number of test points
        for i in range(p):
            n_ = qNodes[i].shape[0]
            nList.append(n_)
            mList.append(Q[i].shape[0])
        if fNodes.ndim == 1:
            # NOTE: the smaller index changes slowest (fortran like)
            fNodes = np.reshape(fNodes, nList, order='F')
        #check the arguments
        testRule = self.liDict['testRule']
        if (p != len(Q)):
            raise ValueError(
                'qNodes and qTest should be of the same dimension.')
        for i in range(p):
            if (np.all(Q[i] > np.amax(qNodes[i], axis=0))):
                raise ValueError(
                    'qTest cannot be larger than max(qNodes) in %d-th dim.' %
                    i)
            if (np.all(Q[i] < np.amin(qNodes[i], axis=0))):
                raise ValueError(
                    'qTest cannot be smaller than min(qNodes) in %d-th dim.' %
                    i)
            if (fNodes.shape[i] != nList[i]):
                raise ValueError(
                    'qNodes and fNodes should be of the same size.')
        #Construct and evaluate Lagrange interpolation
        idxTest = []  #List of indices counting the test points
        Lk = []  #List of Lagrange bases
        for i in range(p):
            idxTest.append(np.arange(mList[i]))
            k = np.arange(nList[i])
            Lk_ = self.basis1d(qNodes[i], k, Q[i])  #Basis at the i-th dim
            Lk.append(Lk_)

        if testRule == 'tensorProd':
            idxTestGrid = reshaper.vecs2grid(idxTest)
        elif testRule == 'set':
            idxTestGrid = idxTest[0]  #same for all dimensions

        mTot = idxTestGrid.shape[0]  #total number of test points
        fInterp = np.zeros(mTot)
        if p > 2:
            mulInd = [[i for i in range(p - 1, -1, -1)]] * (
                p - 1)  #list of indices for tensordot
        else:
            mulInd = p

        for j in range(mTot):  #test points
            idxTest_ = idxTestGrid[j]
            if testRule == 'tensorProd':
                Lk_prod = Lk[0][:, int(idxTest_[0])]
                for i in range(1, p):
                    Lk_prod = np.tensordot(Lk_prod, Lk[i][:,
                                                          int(idxTest_[i])], 0)
            elif testRule == 'set':
                Lk_prod = Lk[0][:, int(idxTest_)]
                for i in range(1, p):
                    Lk_prod = np.tensordot(Lk_prod, Lk[i][:, int(idxTest_)], 0)
            fInterp[j] = np.tensordot(Lk_prod, fNodes, mulInd)
        if testRule == 'tensorProd':
            fInterp = fInterp.reshape(mList, order='F')
        self.val = fInterp
Ejemplo n.º 9
0
 def cnstrct_GQTP_pd(self):
     R"""
     Constructs a PCE over a pD parameter space (p>1) using the following settings:
        * `'sampType':'GQ'` (Gauss-Quadrature nodes)
        * `'truncMethod': 'TP'` (Tensor-product)
        * `'pceSolveMethod':'Projection'` or 'Regression'
     """
     if self.verbose:
        print('... A gPCE for a %d-D parameter space is constructed.'%self.p)
        print('...... Samples in each direction are Gauss Quadrature nodes (User should check this!).')
        print('...... PCE truncation method: TP')
        print('...... Method of computing PCE coefficients: %s' %self.pceSolveMethod)
     distType=self.distType
     p=self.p
     #(1) Quadrature rule
     xi=[]
     w=[]
     fac=[]
     K=1
     for i in range(p):
         xi_,w_=self.gqPtsWts(self.nQList[i],distType[i])
         xi.append(xi_)
         w.append(w_)
         K*=self.nQList[i]
         fac.append(self._gqInteg_fac(distType[i]))
     if self.verbose:    
        print('...... Number of terms in PCE, K= ',K)
     nData=len(self.fVal)   #number of observations
     if self.verbose:
        print('...... Number of Data point, n= ',nData)
     if K!=nData:
        raise ValueError("K=%d is not equal to nData=%d"%(K,nData)) 
     #(2) Index set
     kSet=[]    #index set for the constructed PCE
     kGlob=np.arange(K)   #Global index
     kLoc=kGlob.reshape(self.nQList,order='F')  #Local index
     for i in range(K):
         k_=np.where(kLoc==kGlob[i])
         kSet_=[]
         for j in range(p):
             kSet_.append(k_[j][0])
         kSet.append(kSet_)
     #(3) Find the coefficients in the expansion
     #By default, Projection method is used (assuming samples are Gauss-Quadrature points)
     fCoef=np.zeros(K)
     sum2=[]
     fVal_=self.fVal.reshape(self.nQList,order='F').T #global to local index
     for k in range(K):
         psi_k=[]
         for j in range(p):
             psi_k.append(self.basis(kSet[k][j],xi[j],distType[j]))
         sum1 =np.matmul(fVal_,(psi_k[0]*w[0]))*fac[0]
         sum2_=np.sum(psi_k[0]**2*w[0])*fac[0]
         for i in range(1,p):
             num_=(psi_k[i]*w[i])
             sum1=np.matmul(sum1,num_)*fac[i]
             sum2_*=np.sum(psi_k[i]**2*w[i])*fac[i]
         fCoef[k]=sum1/sum2_
         sum2.append(sum2_)
     #(3b) Compute fCoef via Regression
     if self.pceDict['pceSolveMethod']=='Regression':
        xiGrid=reshaper.vecs2grid(xi)
        A=np.zeros((nData,K))
        for k in range(K):
            aij_=self.basis(kSet[k][0],xiGrid[:,0],distType[0])
            for i in range(1,p):
                aij_*=self.basis(kSet[k][i],xiGrid[:,i],distType[i])
            A[:,k]=aij_
        fCoef=linAlg.myLinearRegress(A,self.fVal)   #This is a uniquely determined system
     #(4) Find the mean and variance of f(q) as estimated by PCE
     fMean=fCoef[0]
     fVar=np.sum(fCoef[1:]**2.*sum2[1:])
     self.coefs=fCoef
     self.fMean=fMean
     self.fVar=fVar
     self.kSet=kSet
Ejemplo n.º 10
0
def lagIntAtGQs_2d_test():
    """
       Test pce2pce_GQ(...) for 2D uncertain parameter space
    """
    #------ SETTINGS ----------------------------------------------------
    #Space 1
    nSamp1 = [6, 10]  #number of samples in PCE1, parameter 1,2
    space1 = [
        [-2, 1.5],  #admissible space of PCE1 (both parameters)
        [-3, 2.5]
    ]
    sampleType1 = ['GLL', 'unifRand']  #see trainSample class in sampling.py
    #Space 2
    nSamp2 = [4, 5]  #number of samples in PCE2, parameter 1,2
    space2 = [
        [-0.5, 1],  #admissible space of PCEw (both parameters)
        [-2., 1.5]
    ]
    #Test samples
    nTest = [100, 101]  #number of test samples of parameter 1,2
    #model function
    fType = 'type1'  #Type of simulator
    #---------------------------------------------------------------------
    p = 2
    distType2 = ['Unif', 'Unif']
    #(1) Generate samples from space 1
    q1 = []
    for i in range(p):
        q1_ = sampling.trainSample(sampleType=sampleType1[i],
                                   qInfo=space1[i],
                                   nSamp=nSamp1[i])
        space1[i] = [
            min(q1_.q), max(q1_.q)
        ]  #correction for uniform samples (otherwise contours are not plotted properly)
        q1.append(q1_.q)
    #Response values at the GL points
    fVal1 = analyticTestFuncs.fEx2D(q1[0], q1[1], fType, 'tensorProd').val
    #(2) Lagrange interpolation from samples 1 to GQ nodes on space 2
    q2, xi2, fVal2 = lagIntAtGQs(fVal1, q1, space1, nSamp2, space2, distType2)
    #(3) Construct a PCE on space 2
    pceDict = {
        'p': p,
        'sampleType': 'GQ',
        'pceSolveMethod': 'Projection',
        'truncMethod': 'TP',
        'distType': distType2
    }
    pce2 = pce(fVal=fVal2, xi=xi2, pceDict=pceDict, nQList=nSamp2)
    #(4) Evaluate the surrogates: Lagrange interpolation over space 1
    #                             PCE over space 2
    #test samples
    qTest1 = []
    xiTest2 = []
    qTest2 = []
    for i in range(p):
        testSamps1 = sampling.testSample('unifSpaced',
                                         qBound=space1[i],
                                         nSamp=nTest[i])
        qTest1.append(testSamps1.q)
        testSamps2 = sampling.testSample('unifSpaced',
                                         GQdistType=distType2[i],
                                         qBound=space2[i],
                                         nSamp=nTest[i])
        xiTest2.append(testSamps2.xi)
        qTest2.append(testSamps2.q)
    #evaluation
    #space 1
    fTest1_ex = analyticTestFuncs.fEx2D(qTest1[0], qTest1[1], fType,
                                        'tensorProd').val
    fTest1 = lagInt(fNodes=fVal1,
                    qNodes=q1,
                    qTest=qTest1,
                    liDict={
                        'testRule': 'tensorProd'
                    }).val
    #space 2
    pceEval2 = pceEval(coefs=pce2.coefs,
                       xi=xiTest2,
                       distType=distType2,
                       kSet=pce2.kSet)
    fTest2 = pceEval2.pceVal
    #(5) 2d contour plots
    plt.figure(figsize=(20, 8))
    plt.subplot(1, 3, 1)
    ax = plt.gca()
    fTest_Grid = fTest1_ex.reshape(nTest, order='F').T
    CS1 = plt.contour(qTest1[0], qTest1[1], fTest_Grid,
                      35)  #,cmap=plt.get_cmap('viridis'))
    plt.clabel(CS1,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.xlabel('q1')
    plt.ylabel('q2')
    plt.title('Exact response surface over space 1')
    #
    plt.subplot(1, 3, 2)
    ax = plt.gca()
    fTest1_Grid = fTest1.reshape(nTest, order='F').T
    CS2 = plt.contour(qTest1[0], qTest1[1], fTest1_Grid,
                      35)  #,cmap=plt.get_cmap('viridis'))
    plt.clabel(CS2,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    q1Grid = reshaper.vecs2grid(q1)
    plt.plot(q1Grid[:, 0], q1Grid[:, 1], 'ob', markersize=6)
    q2_ = reshaper.vecs2grid(q2)
    plt.plot(q2_[:, 0], q2_[:, 1], 'sr', markersize=6)
    plt.xlabel('q1')
    plt.ylabel('q2')
    plt.title(
        'Response surface by Lagrange Int.\n over space-1 using blue circles')
    #
    plt.subplot(1, 3, 3)
    ax = plt.gca()
    fTest2_Grid = fTest2.reshape(nTest, order='F').T
    CS3 = plt.contour(qTest2[0], qTest2[1], fTest2_Grid,
                      20)  #,cmap=plt.get_cmap('viridis'))
    plt.clabel(CS3,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.plot(q2_[:, 0], q2_[:, 1], 'sr', markersize=6)
    plt.xlabel('q1')
    plt.ylabel('q2')
    plt.title('Response surface by PCE over space-2 \n using red squares')
    plt.xlim(space1[0][:])
    plt.ylim(space1[1][:])
    plt.show()
Ejemplo n.º 11
0
    def ppce_cnstrct_pd(self):
        """
        Constructing a probabilistic PCE over a p-D parameter space, p>1
        """
        p = self.p
        print('... Probabilistic PCE for %d-D input parameter.' % p)
        ppceDict = self.ppceDict
        qTrain = self.qTrain
        yTrain = self.yTrain
        noiseSdev = self.noiseV
        # (0) Assignments
        nGQ = ppceDict['nGQtest']
        qInfo = ppceDict['qInfo']
        nMC = ppceDict['nMC']
        nw_ = int(nMC / 10)
        distType = ppceDict['distType']
        # Make a dict for gpr (do NOT change)
        gprOpts = {
            'nIter': ppceDict['nIter_gpr'],
            'lr': ppceDict['lr_gpr'],
            'convPlot': ppceDict['convPlot_gpr']
        }
        standardizeYTrain_ = False
        if 'standardizeYTrain_gpr' in ppceDict.keys():
            gprOpts.update(
                {'standardizeYTrain': ppceDict['standardizeYTrain_gpr']})
            standardizeYTrain_ = True

        # Make a dict for PCE (do NOT change)
        # Always use TP truncation with GQ sampling (hence Projection method)
        pceDict = {
            'p': p,
            'truncMethod': 'TP',
            'sampleType': 'GQ',
            'pceSolveMethod': 'Projection',
            'distType': distType
        }

        # (1) Generate test points that are Gauss quadratures chosen based on
        # the distribution of q (gPCE rule)
        qTestList = []
        for i in range(p):
            sampsGQ = sampling.trainSample(sampleType='GQ',
                                           GQdistType=distType[i],
                                           qInfo=qInfo[i],
                                           nSamp=nGQ[i])
            qTestList.append(sampsGQ.q)
        qTestGrid = reshaper.vecs2grid(qTestList)

        # (2) Construct GPR surrogate based on training data
        gpr_ = gpr_torch.gpr(qTrain, yTrain[:, None], noiseSdev, qTestGrid,
                             gprOpts)
        post_f = gpr_.post_f
        post_obs = gpr_.post_y
        shift_ = 0.0
        scale_ = 1.0
        if standardizeYTrain_:
            shift_ = gpr_.shift[0]  # 0: single-response
            scale_ = gpr_.scale[0]

        # optional: plot constructed response surface
        # gpr_torch.gprPlot().torch2d_3dSurf(qTrain,yTrain,qTestList,post_obs,shift=shift_,scale=scale_)

        # (3) Use samples of GPR tested at GQ nodes to construct a PCE
        fMean_list = []
        fVar_list = []
        for j in range(nMC):
            # Draw a sample for f(q) from GPR surrogate
            f_ = post_obs.sample().numpy() * scale_ + shift_
            # Construct PCE for the drawn sample
            pce_ = pce(fVal=f_,
                       nQList=nGQ,
                       xi=[],
                       pceDict=pceDict,
                       verbose=False)
            fMean_list.append(pce_.fMean)
            fVar_list.append(pce_.fVar)
            if ((j + 1) % nw_ == 0):
                print(
                    "...... ppce repetition for finding samples of the PCE coefficients, iter = %d/%d"
                    % (j + 1, nMC))

        # (4) Outputs
        fMean_list = np.asarray(fMean_list)
        fVar_list = np.asarray(fVar_list)
        # Optional outputs: only used for gprPlot
        optOut = {'post_f': post_f, 'post_obs': post_obs, 'qTest': qTestList}
        self.optOut = optOut
        self.fMean_samps = fMean_list
        self.fVar_samps = fVar_list
Ejemplo n.º 12
0
def gprTorch_2d_singleTask_test():
    """
    Test for GPR for 2d input
    """
    ##
    def plot_trainData(n,fSamples,noiseSdev,yTrain):
        """
        Plot the noisy training data which are used in GPR. 
        """
        plt.figure(figsize=(10,5))
        x_=np.zeros(n)
        for i in range(n):
            x_[i]=i+1
        for i in range(500):  #only for plottig possible realizations
            noise_=noiseSdev*np.random.randn(n)
            plt.plot(x_,fSamples+noise_,'.',color='steelblue',alpha=0.4,markersize=1)
        plt.errorbar(x_,fSamples,yerr=1.96*abs(noiseSdev),ls='none',capsize=5,ecolor='k',
                elinewidth=4,label=r'$95\%$ CI in Obs.')
        plt.plot(x_,fSamples,'o' ,markersize=6,markerfacecolor='lime',
                markeredgecolor='salmon',label='Mean Observation')
        plt.plot(x_,yTrain ,'xr' ,markersize=6,label='Sample Observation')
        plt.legend(loc='best',fontsize=15)
        plt.ylabel('QoI',fontsize=17)
        plt.xlabel('Simulation Index',fontsize=17)
        plt.xticks(fontsize=15)
        plt.yticks(fontsize=15)    
        plt.title('Training data with associated confidence')
        plt.show()
    ##
    def trainDataGen(p,sampleType,n,qBound,fExName,noiseType):
        """
        Generate Training Data
        """
        #  (a) xTrain 
        if sampleType=='grid': 
          nSamp=n[0]*n[1]
          gridList=[]
          for i in range(p):
              #grid_=torch.linspace(qBound[i][0],qBound[i][1],n[i])   #torch
              grid_=np.linspace(qBound[i][0],qBound[i][1],n[i])
              gridList.append(grid_)
          xTrain=reshaper.vecs2grid(gridList)
#       xTrain = gpytorch.utils.grid.create_data_from_grid(gridList)  #torch
        elif sampleType=='random': 
             nSamp=n     #number of random samples   
             xTrain=sampling.LHS_sampling(n,qBound)
        #  (b) Observation noise   
        #noiseSdev=torch.ones(nTot).mul(0.1)    #torch
        noiseSdev=noiseGen(nSamp,noiseType,xTrain,fExName)
        #yTrain = torch.sin(mt.pi*xTrain[:,0])*torch.cos(.25*mt.pi*xTrain[:,1])+
        #         torch.randn_like(xTrain[:,0]).mul(0.1)   #torch
        #  (c) Training response
        yTrain=analyticTestFuncs.fEx2D(xTrain[:,0],xTrain[:,1],fExName,'comp').val
        yTrain_noiseFree=yTrain
        yTrain=yTrain_noiseFree+noiseSdev*np.random.randn(nSamp)
        return xTrain,yTrain,noiseSdev,yTrain_noiseFree
    ##    
    def noiseGen(n,noiseType,xTrain,fExName):
       """
       Generate a 1D numpy array of standard deviations of the observation noise
       """
       if noiseType=='h**o':
          sd=0.2   # noise standard deviation  (Note: non-zero, to avoid instabilities)
          sdV=sd*np.ones(n)
       elif noiseType=='hetero':
          #sdMin=0.01
          #sdMax=0.5
          #sdV=sdMin+(sdMax-sdMin)*np.linspace(0.0,1.0,n)
          #sdV=0.15*np.ones(n)
          sdV=0.1*(analyticTestFuncs.fEx2D(xTrain[:,0],xTrain[:,1],fExName,'comp').val+0.001)
       return sdV
    #
    #----- SETTINGS
    qBound=[[-2,2],[-2,2]]   #Admissible range of parameters
    fExName='type1'          #Type of simulator in analyticTestFuncs.fEx2D
                             #'type1', 'type2', 'type3', 'Rosenbrock'
    sampleType='random'      #'random' or 'grid': type of training samples
    if sampleType=='grid':
       n=[9,9]               #number of training samples in each input dimension
    elif sampleType=='random':
       n=100                 #total number of training samples drawn randomly
    noiseType='hetero'       #noise type: 'h**o'=homoscedastic, 'hetero'=heterscedastic
    #options for GPR
    nIter_=1000        #number of iterations in optimization of GPR hyperparameters
    lr_   =0.05        #learning rate in the optimization of the hyperparameters
    convPlot_=True     #plot convergence of optimization of GPR hyperparameters
    nTest=[21,20]     #number of test points in each parameter dimension
    #------------------------------------------------
    #(0) Assemble the gprOpts dict
    gprOpts={'nIter':nIter_,'lr':lr_,'convPlot':convPlot_}
    #(1) Generate training data
    p=len(qBound)    #dimension of the input
    xTrain,yTrain,noiseSdev,yTrain_noiseFree=trainDataGen(p,sampleType,n,qBound,fExName,noiseType)
    nSamp=yTrain.shape[0]
    plot_trainData(nSamp,yTrain_noiseFree,noiseSdev,yTrain)
    #(2) Create the test samples
    xTestList=[]
    for i in range(p):
        #grid_=torch.linspace(qBound[i][0],qBound[i][1],20)    #torch
        grid_=np.linspace(qBound[i][0],qBound[i][1],nTest[i])
        xTestList.append(grid_)
    xTest=reshaper.vecs2grid(xTestList)
    #(3) Construct the GPR based on the training data and make predictions at the test samples
    gpr_=gpr(xTrain,yTrain[:,None],noiseSdev,xTest,gprOpts)
    post_f=gpr_.post_f
    post_obs=gpr_.post_y
    # Predicted mean and variance of the posteriors at the test grid    
    fP_=gprPost(post_f,nTest)
    fP_.torchPost()
    post_f_mean=fP_.mean
    post_f_sdev=fP_.sdev
    lower_f=fP_.ciL
    upper_f=fP_.ciU
    obsP_=gprPost(post_obs,nTest)
    obsP_.torchPost()
    post_obs_mean=obsP_.mean
    post_obs_sdev=obsP_.sdev
    lower_obs=obsP_.ciL
    upper_obs=obsP_.ciU
    # Plots
    with torch.no_grad():
        fig = plt.figure(figsize=(16,4))
        ax = fig.add_subplot(141)        
        fEx_test=analyticTestFuncs.fEx2D(xTest[:,0],xTest[:,1],fExName,'comp').val
        CS0=ax.contour(xTestList[0],xTestList[1],fEx_test.reshape((nTest[0],nTest[1]),order='F').T,levels=40)
        ax.clabel(CS0, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False)
        ax.plot(xTrain[:,0],xTrain[:,1],'or')
        ax.set_title(r'Exact $f(q)$')
        ax = fig.add_subplot(142)
        CS1=ax.contour(xTestList[0],xTestList[1],(post_f_mean).T,levels=40)
        ax.clabel(CS1, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False)
        ax.plot(xTrain[:,0],xTrain[:,1],'or')
        ax.set_title(r'Mean Posterior of $f(q)$')
        ax = fig.add_subplot(143)
        CS2=ax.contour(xTestList[0],xTestList[1],upper_obs.T,levels=40)
        ax.clabel(CS2, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False)
        ax.plot(xTrain[:,0],xTrain[:,1],'or')
        ax.set_title(r'Upper Confidence for Observations')
        ax = fig.add_subplot(144)
        CS2=ax.contour(xTestList[0],xTestList[1],lower_obs.T,levels=40)
        ax.clabel(CS2, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False)
        ax.plot(xTrain[:,0],xTrain[:,1],'or')
        ax.set_title(r'Lower Confidence for Observations')
        plt.show()
        #2dplot
        pltOpts={'title':'Mean posterior of f(q)',
                 'xlab':r'$q_1$',
                 'ylab':r'$q_2$'}
        gprPlot(pltOpts).torch2d_2dcont(xTrain,xTestList,post_f_mean) 
        #3d plot
        gprPlot().torch2d_3dSurf(xTrain,yTrain,xTestList,post_obs)
Ejemplo n.º 13
0
def sobol_2par_unif_test():
    """
      Test for sobol when we have 2 uncertain parameters q1, q2.
      Sobol indices are computed for f(q1,q2)=q1**2.+q1*q2 that is analyticTestFuncs.fEx2D('type3').
      Indices are computed from the following methods:
       * Method1: Direct computation by UQit
       * Method2: First a PCE is constructed and then its values are used to compute Sobol indices
       * Method3: Analytical expressions (reference values)
    """
    #--------------------------
    #------- SETTINGS
    n=[101, 100]       #number of samples for q1 and q2, Method1
    qBound=[[-3,1],   #admissible range of parameters
            [-1,2]]
    nQpce=[5,6]      #number of GQ points for Method2
    #--------------------------
    fType='type3'    #type of analytical function
    p=len(n)
    distType=['Unif']*p
    #(1) Samples from parameters space
    q=[]
    pdf=[]
    for i in range(p):
        q.append(np.linspace(qBound[i][0],qBound[i][1],n[i]))
        pdf.append(np.ones(n[i])/(qBound[i][1]-qBound[i][0]))
    #(2) Compute function value at the parameter samples
    fEx_=analyticTestFuncs.fEx2D(q[0],q[1],fType,'tensorProd')
    fEx=np.reshape(fEx_.val,n,'F')
    #(3) Compute Sobol indices direct numerical integration
    sobol_=sobol(q,fEx,pdf)
    Si=sobol_.Si
    STi=sobol_.STi
    Sij=sobol_.Sij

    #(4) Construct a PCE and then use the predictions of that in numerical integration
    #for computing Sobol indices.
    #Generate observations at Gauss-Legendre points
    xi=[]
    qpce=[]
    for i in range(p):
        samps=sampling.trainSample(sampleType='GQ',GQdistType=distType[i],qInfo=qBound[i],nSamp=nQpce[i])
        xi.append(samps.xi)
        qpce.append(samps.q)
    fVal_pceCnstrct=analyticTestFuncs.fEx2D(qpce[0],qpce[1],fType,'tensorProd').val
    #Construct the PCE
    xiGrid=reshaper.vecs2grid(xi)
    pceDict={'p':2,'sampleType':'GQ','truncMethod':'TP','pceSolveMethod':'Projection',
             'distType':distType}
    pce_=pce(fVal=fVal_pceCnstrct,nQList=nQpce,xi=xiGrid,pceDict=pceDict)

    #Use the PCE to predict at test samples from parameter space
    qpceTest=[]
    xiTest=[]
    for i in range(p):
        testSamps=sampling.testSample('unifSpaced',GQdistType=distType[i],qBound=qBound[i],nSamp=n[i])
        xiTest.append(testSamps.xi)
        qpceTest.append(testSamps.q)
    fPCETest_=pceEval(coefs=pce_.coefs,kSet=pce_.kSet,xi=xiTest,distType=distType)
    fPCETest=fPCETest_.pceVal
    #compute Sobol indices
    sobolPCE_=sobol(qpceTest,fPCETest,pdf)
    Si_pce=sobolPCE_.Si
    Sij_pce=sobolPCE_.Sij

    #(5) Exact Sobol indices (analytical expressions)
    if fType=='type3':
       fEx_.sobol(qBound)
       Si_ex=fEx_.Si
       STi_ex=fEx_.STi
       Sij_ex=fEx_.Sij

    #(6) results
    print(' > Main Indices by UQit:\n\t S1=%g, S2=%g, S12=%g' %(Si[0],Si[1],Sij[0]))
    print(' > Main indice by gPCE+Numerical Integration:\n\t S1=%g, S2=%g, S12=%g' %(Si_pce[0],Si_pce[1],Sij_pce[0]))
    print(' > Main Analytical Reference:\n\t S1=%g, S2=%g, S12=%g' %(Si_ex[0],Si_ex[1],Sij_ex[0]))
    print(' > Total Indices by UQit:\n\t ST1=%g, ST2=%g' %(STi[0],STi[1]))
    print(' > Total Analytical Reference:\n\t ST1=%g, ST2=%g' %(STi_ex[0],STi_ex[1]))
Ejemplo n.º 14
0
def gprTorch_2d_singleTask_test2():
    """
    Test for GPR for 2d input - Importance of standardization of the training data
    Run the test with 'standardizeYTrain_' being True and False. 
    """

    ##
    def trainData():
        """
        Generate training data
        """
        qBound = [[-1, 1], [-1, 1]]
        x1_ = sampling.trainSample(sampleType='GQ',
                                   GQdistType='Unif',
                                   qInfo=qBound[0],
                                   nSamp=4)
        x2_ = sampling.trainSample(sampleType='GQ',
                                   GQdistType='Unif',
                                   qInfo=qBound[1],
                                   nSamp=4)
        xTrain = reshaper.vecs2grid([x1_.q, x2_.q])
        yTrain_mean = np.asarray([
            -0.0169906, -0.0191095, -0.0167435, -0.0172338, -0.0203195,
            -0.020089, -0.0184691, -0.0188843, -0.0164581, -0.0200013,
            -0.0186512, -0.0159343, -0.0185975, -0.0155899, -0.0178921,
            -0.018329
        ])
        yTrain_sdev = np.asarray([
            0.00131249, 0.00104324, 0.00085491, 0.00099751, 0.00094231,
            0.00102579, 0.0010804, 0.00089567, 0.00081245, 0.0011208,
            0.00110756, 0.00126673, 0.00108875, 0.00145115, 0.00098541,
            0.00130559
        ])

        return qBound, xTrain, yTrain_mean, yTrain_sdev

    #
    #----- SETTINGS
    #options for GPR
    nIter_ = 3000  #number of iterations in optimization of GPR hyperparameters
    lr_ = 0.05  #learning rate in the optimization of the hyperparameters
    convPlot_ = True  #plot convergence of optimization of GPR hyperparameters
    standardizeYTrain_ = True  #standardize the Y training data?
    nTest = [41, 40]  #number of test points in each parameter dimension
    #---------------------------------
    #(1) Generate training data
    qBound, xTrain, yTrain_mean, yTrain_sdev = trainData()
    p = len(qBound)  #dimension of the input
    nSamp = len(yTrain_mean)

    #(2) Generate noisy training data
    noise_ = np.random.randn(nSamp)
    noise_ = yTrain_sdev * noise_
    yTrain = yTrain_mean + noise_

    #(3) Create test points
    xTestList = []
    for i in range(p):
        grid_ = np.linspace(qBound[i][0], qBound[i][1], nTest[i])
        xTestList.append(grid_)
    xTest = reshaper.vecs2grid(xTestList)

    #(4) Fit the GPR
    gprOpts = {
        'nIter': nIter_,
        'lr': lr_,
        'convPlot': convPlot_,
        'standardizeYTrain': standardizeYTrain_
    }
    gpr_ = gpr(xTrain, yTrain[:, None], yTrain_sdev, xTest, gprOpts)
    post_f = gpr_.post_f
    post_obs = gpr_.post_y
    #(4) Predicted mean and variance of the posteriors at the test grid
    shift_ = 0.0  #default shift and scaling, assuming no standardization in the training data
    scale_ = 1.0
    if standardizeYTrain_:
        shift_ = gpr_.shift[0]  #0: single-response
        scale_ = gpr_.scale[0]

    fP_ = gprPost(post_f, nTest, shift=shift_, scale=scale_)
    fP_.torchPost()
    post_f_mean = fP_.mean
    post_f_sdev = fP_.sdev
    obsP_ = gprPost(post_obs, nTest, shift=shift_, scale=scale_)
    obsP_.torchPost()
    post_obs_mean = obsP_.mean
    post_obs_sdev = obsP_.sdev

    # (5) Plots
    # When using 'torch2d_2dcont', de-standardization of the predicted  mean and sdev is manual
    pltOpts = {
        'title': 'Mean of posterior f(q)',
        'xlab': r'$q_1$',
        'ylab': r'$q_2$'
    }
    gprPlot(pltOpts).torch2d_2dcont(xTrain, xTestList,
                                    post_f_mean * scale_ + shift_)
    pltOpts = {
        'title': 'Sdev of posterior f(q)',
        'xlab': r'$q_1$',
        'ylab': r'$q_2$'
    }
    gprPlot(pltOpts).torch2d_2dcont(xTrain, xTestList, post_f_sdev * scale_)
    # When using torch2d_3dSurf, the optional arguments shift and scale can be passed.
    # Therefore, de-standardization of the predictions is automatic.
    pltOpts = {
        'title': 'Mean of posterior f(q)',
        'xlab': r'$q_1$',
        'ylab': r'$q_2$'
    }
    gprPlot(pltOpts).torch2d_3dSurf(xTrain,
                                    yTrain,
                                    xTestList,
                                    post_f,
                                    shift=shift_,
                                    scale=scale_)
    pltOpts = {
        'title': 'Mean of posterior predictive y=f(q)+e',
        'xlab': r'$q_1$',
        'ylab': r'$q_2$'
    }
    gprPlot(pltOpts).torch2d_3dSurf(xTrain,
                                    yTrain,
                                    xTestList,
                                    post_obs,
                                    shift=shift_,
                                    scale=scale_)
Ejemplo n.º 15
0
def pce_3d_test():
    """
    Test PCE for 3D uncertain parameter
    """
    #----- SETTINGS------------
    distType = ['Unif', 'Unif', 'Unif']  #distribution type of the parameters
    qInfo = [
        [-0.75, 1.5],  #range of parameters
        [-0.5, 2.5],
        [1.0, 3.0]
    ]
    nQ = [6, 5, 4]  #number of parameter samples in the 3 dimensions
    funOpt = {'a': 7, 'b': 0.1}  #parameters in the Ishigami function
    #PCE options
    truncMethod = 'TO'  #'TP'=Tensor Product
    #'TO'=Total Order
    sampleType = 'GQ'  #'GQ'=Gauss Quadrature nodes
    #other types: see trainSample in sampling.py
    pceSolveMethod = 'Regression'  #'Regression': for any combination of sample points and truncation methods
    #'Projection': only for 'GQ'+'TP'
    nTest = [5, 4, 3]  #number of test samples for the parameters
    if truncMethod == 'TO':
        LMax = 10  #max polynomial order in each parameter direction
    #--------------------
    p = len(distType)
    #Assemble the pceDict
    pceDict = {
        'p': p,
        'truncMethod': truncMethod,
        'sampleType': sampleType,
        'pceSolveMethod': pceSolveMethod,
        'distType': distType
    }
    if truncMethod == 'TO':
        pceDict.update({'LMax': LMax})
    #Generate training data
    xi = []
    q = []
    qBound = []
    for i in range(p):
        samps = sampling.trainSample(sampleType=sampleType,
                                     GQdistType=distType[i],
                                     qInfo=qInfo[i],
                                     nSamp=nQ[i])
        xi.append(samps.xi)
        q.append(samps.q)
        qBound.append(samps.qBound)
    fEx = analyticTestFuncs.fEx3D(q[0], q[1], q[2], 'Ishigami', 'tensorProd',
                                  funOpt)
    fVal = fEx.val
    #Construct the PCE
    xiGrid = reshaper.vecs2grid(xi)
    pce_ = pce(fVal=fVal, xi=xiGrid, pceDict=pceDict, nQList=nQ)
    fMean = pce_.fMean
    fVar = pce_.fVar
    pceCoefs = pce_.coefs
    kSet = pce_.kSet
    #Convergence of the PCE terms
    convPlot(coefs=pceCoefs, distType=distType, kSet=kSet)
    #Exact moments of the Ishigami function
    fEx.moments(qInfo=qBound)
    m = fEx.mean
    v = fEx.var
    #Compare the moments estimated by PCE with the exact analytical values
    print(writeUQ.printRepeated('-', 50))
    print('\t\t Exact \t\t PCE')
    print('E[f]:  ', m, fMean)
    print('V[f]:  ', v, fVar)
    print(writeUQ.printRepeated('-', 50))
    #Compare the PCE predictions at test points with the exact values of the model response
    qTest = []
    xiTest = []
    for i in range(p):
        testSamps = sampling.testSample('unifSpaced',
                                        GQdistType=distType[i],
                                        qInfo=qInfo[i],
                                        qBound=qBound[i],
                                        nSamp=nTest[i])
        qTest.append(testSamps.q)
        xiTest.append(testSamps.xi)
    fVal_test_ex = analyticTestFuncs.fEx3D(qTest[0], qTest[1], qTest[2],
                                           'Ishigami', 'tensorProd',
                                           funOpt).val
    #PCE prediction at test points
    pcePred_ = pceEval(coefs=pceCoefs, xi=xiTest, distType=distType, kSet=kSet)
    fVal_test_pce = pcePred_.pceVal
    #Plot the exact and PCE response values
    nTest_ = np.prod(np.asarray(nTest))
    fVal_test_pce_ = fVal_test_pce.reshape(nTest_, order='F')
    err = np.linalg.norm(fVal_test_pce_ - fVal_test_ex)
    plt.figure(figsize=(10, 4))
    plt.plot(fVal_test_pce_, '-ob', mfc='none', ms=5, label='Exact')
    plt.plot(fVal_test_ex, '-xr', ms=5, label='PCE')
    plt.xlabel('Index of test samples, k')
    plt.ylabel('Model response')
    plt.legend(loc='best')
    plt.grid(alpha=0.4)
    plt.show()
    print('||fEx(q)-fPCE(q)|| % = ', err * 100)
Ejemplo n.º 16
0
def pce_2d_test():
    """
    Test PCE for 2D uncertain parameter
    """
    #---- SETTINGS------------
    #Parameters specifications
    distType = ['Norm', 'Norm']  #distribution type of the parameters q1, q2
    qInfo = [
        [-2, 1],  #info on parameters
        [-2, 0.4]
    ]
    nQ = [7, 6]  #number of training samples of parameters
    nTest = [121,
             120]  #number of test points in parameter spaces to evaluate PCE
    #PCE Options
    truncMethod = 'TO'  #'TP'=Tensor Product
    #'TO'=Total Order
    sampleType = [
        'GQ', 'GQ'
    ]  #'GQ'=Gauss Quadrature nodes ('Projection' or 'Regression')
    #For other type of samples, see sampling.py, trainSample => only 'Regression' can be used
    #'LHS': Latin Hypercube Sampling (only when all distType='Unif')
    fType = 'type1'  #Type of the exact model response, 'type1', 'type2', 'type3', 'Rosenbrock'
    pceSolveMethod = 'Regression'  #'Regression': for any combination of sampling and truncation methods
    #'Projection': only for 'GQ'+'TP'
    if truncMethod == 'TO':
        LMax = 8  #max polynomial order in each parameter dimention
    #------------------------
    p = len(distType)
    #Assemble the pceDict
    pceDict = {
        'p': p,
        'truncMethod': truncMethod,
        'sampleType': sampleType,
        'pceSolveMethod': pceSolveMethod,
        'distType': distType
    }
    if truncMethod == 'TO':
        pceDict.update({'LMax': LMax, 'pceSolveMethod': 'Regression'})
    #Generate the training data
    xi = []
    q = []
    qBound = []
    if sampleType[0] == 'LHS' and sampleType[1] == 'LHS':
        if distType == ['Unif'] * p:
            qBound = qInfo
            xi = sampling.LHS_sampling(nQ[0] * nQ[1], [[-1, 1]] * p)
            for i in range(p):
                q.append(pce.mapFromUnit(xi[:, i], qBound[i]))
            fEx_ = analyticTestFuncs.fEx2D(q[0], q[1], fType, 'comp')
            xiGrid = xi
        else:
            raise ValueError(
                "LHS works only when all q have 'Unif' distribution.")
    else:
        for i in range(p):
            samps = sampling.trainSample(sampleType=sampleType[i],
                                         GQdistType=distType[i],
                                         qInfo=qInfo[i],
                                         nSamp=nQ[i])
            q.append(samps.q)
            xi.append(samps.xi)
            qBound.append(samps.qBound)
        fEx_ = analyticTestFuncs.fEx2D(q[0], q[1], fType, 'tensorProd')
        xiGrid = reshaper.vecs2grid(xi)
    fVal = fEx_.val
    #Construct the PCE
    pce_ = pce(fVal=fVal, xi=xiGrid, pceDict=pceDict, nQList=nQ)
    fMean = pce_.fMean
    fVar = pce_.fVar
    pceCoefs = pce_.coefs
    kSet = pce_.kSet
    #Plot the convergence indicator of the PCE
    convPlot(coefs=pceCoefs, distType=distType, kSet=kSet)
    #Generate test samples for the parameters and evaluate the exact response surface at them
    qTest = []
    xiTest = []
    for i in range(p):
        testSamps = sampling.testSample('unifSpaced',
                                        GQdistType=distType[i],
                                        qInfo=qInfo[i],
                                        qBound=qBound[i],
                                        nSamp=nTest[i])
        qTest_ = testSamps.q
        xiTest_ = testSamps.xi
        qTest.append(qTest_)
        xiTest.append(xiTest_)
    fTest = analyticTestFuncs.fEx2D(qTest[0], qTest[1], fType,
                                    'tensorProd').val
    #Evaluate PCE at the test samples
    pcePred_ = pceEval(coefs=pceCoefs, xi=xiTest, distType=distType, kSet=kSet)
    fPCE = pcePred_.pceVal
    #Use MC method to directly estimate reference values for the mean and varaiance of f(q)
    fEx_.moments(distType, qInfo)
    fMean_mc = fEx_.mean
    fVar_mc = fEx_.var

    #Compare the PCE estimates for moments of f(q) with the reference values from MC
    print(writeUQ.printRepeated('-', 70))
    print('------------ MC -------- PCE --------- Error % ')
    print('Mean of f(q) = %g\t%g\t%g' % (fMean_mc, fMean,
                                         (fMean - fMean_mc) / fMean_mc * 100.))
    print('Var  of f(q) = %g\t%g\t%g' % (fVar_mc, fVar,
                                         (fVar - fVar_mc) / fVar_mc * 100.))
    print(writeUQ.printRepeated('-', 70))
    #Plot the exact and PCE response surfaces as contours in the parameters space
    # Create 2D grid from the test samples and plot the contours of response surface over it
    fTestGrid = fTest.reshape(nTest, order='F')
    fErrorGrid = (abs(fTestGrid - fPCE))
    # 2D grid from the sampled parameters
    if sampleType[0] == 'LHS' and sampleType[1] == 'LHS':
        qGrid = reshaper.vecsGlue(q[0], q[1])
    else:
        qGrid = reshaper.vecs2grid(q)
    plt.figure(figsize=(21, 8))
    plt.subplot(1, 3, 1)
    ax = plt.gca()
    CS1 = plt.contour(qTest[0], qTest[1], fTestGrid.T, 40)
    plt.clabel(CS1,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.plot(qGrid[:, 0], qGrid[:, 1], 'o', color='r', markersize=7)
    plt.xlabel(r'$q_1$')
    plt.ylabel(r'$q_2$')
    plt.title('Exact Response')
    plt.subplot(1, 3, 2)
    ax = plt.gca()
    CS2 = plt.contour(qTest[0], qTest[1], fPCE.T, 40)
    plt.clabel(CS2,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.plot(qGrid[:, 0], qGrid[:, 1], 'o', color='r', markersize=7)
    plt.xlabel(r'$q_1$')
    plt.ylabel(r'$q_2$')
    plt.title('PCE Response')
    plt.subplot(1, 3, 3)
    ax = plt.gca()
    CS3 = plt.contour(qTest[0], qTest[1], fErrorGrid.T, 40)
    plt.clabel(CS3,
               inline=True,
               fontsize=13,
               colors='k',
               fmt='%0.2f',
               rightside_up=True,
               manual=False)
    plt.xlabel(r'$q_1$')
    plt.ylabel(r'$q_2$')
    plt.plot(qGrid[:, 0], qGrid[:, 1], 'o', color='r', markersize=7)
    plt.title('|Exact-Surrogate|')
    plt.show()