def fEx(p, sampleType, n, qInfo, fExName): """ Generate synthetic training data """ # (a) xTrain nSamp = n[0] * n[1] xi = [] q = [] qBound = [] if sampleType[0] == 'LHS' and sampleType[1] == 'LHS': if distType == ['Unif'] * p: qBound = qInfo xi = sampling.LHS_sampling(nSamp, [[-1, 1]] * p) xTrain = np.zeros((nSamp, p)) for i in range(p): xTrain[:, i] = pce.mapFromUnit(xi[:, i], qBound[i]) fEx_ = analyticTestFuncs.fEx2D(xTrain[:, 0], xTrain[:, 1], fExName, 'comp') else: raise ValueError( "LHS works only when all q have 'Unif' distribution.") else: for i in range(p): samps = sampling.trainSample(sampleType=sampleType[i], GQdistType=distType[i], qInfo=qInfo[i], nSamp=n[i]) q.append(samps.q) xTrain = reshaper.vecs2grid(q) fEx_ = analyticTestFuncs.fEx2D(q[0], q[1], fExName, 'tensorProd') return xTrain, fEx_
def lagInt_Quads2Line_test(): """ Test lagInt_Quads2Line(). The test samples of (q1,q2) are generated along a defined line q2=a*q1+b in the admissible space of q1-a2. The training samples are drawn in the usual way, covering the admissible space of q1-q2. """ #----- SETTINGS -------------------------------------------------------------- nNodes = [9, 9] #number of training samples for q1, q2 sampType = [ 'GLL', #type of training samples for q1, q2 'unifSpaced' ] qBound = [ [-0.75, 1.5], #admissible range of q1,q2 [-0.8, 2.5] ] #Note that the line should be confined in this space lineDef = { 'start': [1.4, 2.3], #coordinates of the line's starting point in the q1-q2 plane 'end': [-0.7, -0.2], #coordinates of the line's end point in the q1-q2 plane 'noPtsLine': 100 #number of the test samples } #----------------------------------------------------------------------------- p = len(nNodes) # Generate the training samples qNodes = [] for i in range(p): qNodes_ = sampling.trainSample(sampleType=sampType[i], qInfo=qBound[i], nSamp=nNodes[i]) qNodes.append(qNodes_.q) # Evaluate the simulator at the training samples fNodes = analyticTestFuncs.fEx2D(qNodes[0], qNodes[1], 'type1', 'tensorProd').val # Construct the lagrange interpolation and evalautes it at the test points over the line qLine, fLine = lagInt_Quads2Line(fNodes, qNodes, lineDef) # Plots plt.figure(figsize=(8, 5)) plt.plot(qLine[0], fLine, '-ob', mfc='none', label='Lagrange Interpolation') fLine_ex = analyticTestFuncs.fEx2D(qLine[0], qLine[1], 'type1', 'comp').val #exact response plt.plot(qLine[0], fLine_ex, '-xr', label='Exact Value') plt.xlabel(r'$q_1$', fontsize=16) plt.ylabel('Response', fontsize=14) plt.legend(loc='best') plt.grid(alpha=0.4) plt.show()
def trainDataGen(p,sampleType,n,qBound,fExName,noiseType): """ Generate Training Data """ # (a) xTrain if sampleType=='grid': nSamp=n[0]*n[1] gridList=[] for i in range(p): #grid_=torch.linspace(qBound[i][0],qBound[i][1],n[i]) #torch grid_=np.linspace(qBound[i][0],qBound[i][1],n[i]) gridList.append(grid_) xTrain=reshaper.vecs2grid(gridList) # xTrain = gpytorch.utils.grid.create_data_from_grid(gridList) #torch elif sampleType=='random': nSamp=n #number of random samples xTrain=sampling.LHS_sampling(n,qBound) # (b) Observation noise #noiseSdev=torch.ones(nTot).mul(0.1) #torch noiseSdev=noiseGen(nSamp,noiseType,xTrain,fExName) #yTrain = torch.sin(mt.pi*xTrain[:,0])*torch.cos(.25*mt.pi*xTrain[:,1])+ # torch.randn_like(xTrain[:,0]).mul(0.1) #torch # (c) Training response yTrain=analyticTestFuncs.fEx2D(xTrain[:,0],xTrain[:,1],fExName,'comp').val yTrain_noiseFree=yTrain yTrain=yTrain_noiseFree+noiseSdev*np.random.randn(nSamp) return xTrain,yTrain,noiseSdev,yTrain_noiseFree
def noiseGen(n, noiseType, xTrain, fExName): """ Generate a 1D numpy array of standard deviation of the observation noise """ if noiseType == 'h**o': sd = 0.2 #(non-zero, to avoid instabilities) sdV = sd * np.ones(n) elif noiseType == 'hetero': sdV = 0.1 * (analyticTestFuncs.fEx2D(xTrain[:, 0], xTrain[:, 1], fExName, 'comp').val + 0.001) return sdV
def noiseGen(n,noiseType,xTrain,fExName): """ Generate a 1D numpy array of standard deviations of the observation noise """ if noiseType=='h**o': sd=0.2 # noise standard deviation (Note: non-zero, to avoid instabilities) sdV=sd*np.ones(n) elif noiseType=='hetero': #sdMin=0.01 #sdMax=0.5 #sdV=sdMin+(sdMax-sdMin)*np.linspace(0.0,1.0,n) #sdV=0.15*np.ones(n) sdV=0.1*(analyticTestFuncs.fEx2D(xTrain[:,0],xTrain[:,1],fExName,'comp').val+0.001) return sdV
def trainDataGen(p, sampleType, n, qBound, fExName, noiseType): """ Generate Training Data """ # (a) xTrain if sampleType == 'grid': nSamp = n[0] * n[1] gridList = [] for i in range(p): grid_ = np.linspace(qBound[i][0], qBound[i][1], n[i]) gridList.append(grid_) xTrain = reshaper.vecs2grid(gridList) elif sampleType == 'random': nSamp = n # number of random samples xTrain = sampling.LHS_sampling(n, qBound) # (b) Observation noise noiseSdev = noiseGen(nSamp, noiseType, xTrain, fExName) # (c) Training response yTrain = analyticTestFuncs.fEx2D(xTrain[:, 0], xTrain[:, 1], fExName, 'comp').val yTrain_noiseFree = yTrain yTrain = yTrain_noiseFree + noiseSdev * np.random.randn(nSamp) return xTrain, yTrain, noiseSdev, yTrain_noiseFree
def lagInt_2d_test(): """ Test Lagrange inerpolation over a 2D parameter space. """ #----- SETTINGS -------------------------------------------------------------- nNodes = [ 5, 4 ] #number of training samples nodes in space of parameters q1, q2 sampType = [ 'GLL', #Method of drawing samples for q1, q2 'unifSpaced' ] qBound = [ [-0.75, 1.5], # admissible range of parameters [-0.5, 2.5] ] # Settings of the exact response surface domRange = [ [-2, 2], #domain range for q1, q2 [-3, 3] ] nTest = [100, 101] #number of test samples #----------------------------------------------------------------------------- p = len(nNodes) # Create the training samples over each parameter space qNodes = [] for i in range(p): qNodes_ = sampling.trainSample(sampleType=sampType[i], qInfo=qBound[i], nSamp=nNodes[i]) qNodes.append(qNodes_.q) # Evaluate the simulator at each joint sample fNodes = analyticTestFuncs.fEx2D(qNodes[0], qNodes[1], 'type1', 'tensorProd').val # Generate the test samples qTestList = [] for i in range(p): qTest_ = sampling.testSample(sampleType='unifSpaced', qBound=qBound[i], nSamp=nTest[i]) qTestList.append(qTest_.q) # Construct the Lagrange interpolation and evaluate it at the test samples fTest = lagInt(fNodes=fNodes, qNodes=qNodes, qTest=qTestList, liDict={ 'testRule': 'tensorProd' }).val # Evaluate the exact model response over domRange qTestFull = [] for i in range(p): qTestFull_ = np.linspace(domRange[i][0], domRange[i][1], nTest[i]) qTestFull.append(qTestFull_) fTestFull = analyticTestFuncs.fEx2D(qTestFull[0], qTestFull[1], 'type1', 'tensorProd').val fTestFullGrid = fTestFull.reshape((nTest[0], nTest[1]), order='F').T fTestGrid = fTest.reshape((nTest[0], nTest[1]), order='F').T # Plots plt.figure(figsize=(16, 8)) plt.subplot(1, 2, 1) ax = plt.gca() CS1 = plt.contour(qTestFull[0], qTestFull[1], fTestFullGrid, 35) plt.clabel(CS1, inline=True, fontsize=15, colors='k', fmt='%0.2f', rightside_up=True, manual=False) qNodesGrid = reshaper.vecs2grid(qNodes) plt.plot(qNodesGrid[:, 0], qNodesGrid[:, 1], 'o', color='r', markersize=6) plt.xlabel(r'$q_1$', fontsize=25) plt.ylabel(r'$q_2$', fontsize=25) plt.xticks(fontsize=17) plt.yticks(fontsize=17) plt.title('Exact Response Surface') plt.subplot(1, 2, 2) ax = plt.gca() CS2 = plt.contour(qTestList[0], qTestList[1], fTestGrid, 20) plt.clabel(CS2, inline=True, fontsize=15, colors='k', fmt='%0.2f', rightside_up=True, manual=False) plt.plot(qNodesGrid[:, 0], qNodesGrid[:, 1], 'o', color='r', markersize=6) plt.xlabel(r'$q_1$', fontsize=25) plt.ylabel(r'$q_2$', fontsize=25) plt.xticks(fontsize=17) plt.yticks(fontsize=17) plt.title('Response Surface by Lagrange Interpolation') plt.xlim(domRange[0]) plt.ylim(domRange[1]) plt.show()
def lagIntAtGQs_2d_test(): """ Test pce2pce_GQ(...) for 2D uncertain parameter space """ #------ SETTINGS ---------------------------------------------------- #Space 1 nSamp1 = [6, 10] #number of samples in PCE1, parameter 1,2 space1 = [ [-2, 1.5], #admissible space of PCE1 (both parameters) [-3, 2.5] ] sampleType1 = ['GLL', 'unifRand'] #see trainSample class in sampling.py #Space 2 nSamp2 = [4, 5] #number of samples in PCE2, parameter 1,2 space2 = [ [-0.5, 1], #admissible space of PCEw (both parameters) [-2., 1.5] ] #Test samples nTest = [100, 101] #number of test samples of parameter 1,2 #model function fType = 'type1' #Type of simulator #--------------------------------------------------------------------- p = 2 distType2 = ['Unif', 'Unif'] #(1) Generate samples from space 1 q1 = [] for i in range(p): q1_ = sampling.trainSample(sampleType=sampleType1[i], qInfo=space1[i], nSamp=nSamp1[i]) space1[i] = [ min(q1_.q), max(q1_.q) ] #correction for uniform samples (otherwise contours are not plotted properly) q1.append(q1_.q) #Response values at the GL points fVal1 = analyticTestFuncs.fEx2D(q1[0], q1[1], fType, 'tensorProd').val #(2) Lagrange interpolation from samples 1 to GQ nodes on space 2 q2, xi2, fVal2 = lagIntAtGQs(fVal1, q1, space1, nSamp2, space2, distType2) #(3) Construct a PCE on space 2 pceDict = { 'p': p, 'sampleType': 'GQ', 'pceSolveMethod': 'Projection', 'truncMethod': 'TP', 'distType': distType2 } pce2 = pce(fVal=fVal2, xi=xi2, pceDict=pceDict, nQList=nSamp2) #(4) Evaluate the surrogates: Lagrange interpolation over space 1 # PCE over space 2 #test samples qTest1 = [] xiTest2 = [] qTest2 = [] for i in range(p): testSamps1 = sampling.testSample('unifSpaced', qBound=space1[i], nSamp=nTest[i]) qTest1.append(testSamps1.q) testSamps2 = sampling.testSample('unifSpaced', GQdistType=distType2[i], qBound=space2[i], nSamp=nTest[i]) xiTest2.append(testSamps2.xi) qTest2.append(testSamps2.q) #evaluation #space 1 fTest1_ex = analyticTestFuncs.fEx2D(qTest1[0], qTest1[1], fType, 'tensorProd').val fTest1 = lagInt(fNodes=fVal1, qNodes=q1, qTest=qTest1, liDict={ 'testRule': 'tensorProd' }).val #space 2 pceEval2 = pceEval(coefs=pce2.coefs, xi=xiTest2, distType=distType2, kSet=pce2.kSet) fTest2 = pceEval2.pceVal #(5) 2d contour plots plt.figure(figsize=(20, 8)) plt.subplot(1, 3, 1) ax = plt.gca() fTest_Grid = fTest1_ex.reshape(nTest, order='F').T CS1 = plt.contour(qTest1[0], qTest1[1], fTest_Grid, 35) #,cmap=plt.get_cmap('viridis')) plt.clabel(CS1, inline=True, fontsize=13, colors='k', fmt='%0.2f', rightside_up=True, manual=False) plt.xlabel('q1') plt.ylabel('q2') plt.title('Exact response surface over space 1') # plt.subplot(1, 3, 2) ax = plt.gca() fTest1_Grid = fTest1.reshape(nTest, order='F').T CS2 = plt.contour(qTest1[0], qTest1[1], fTest1_Grid, 35) #,cmap=plt.get_cmap('viridis')) plt.clabel(CS2, inline=True, fontsize=13, colors='k', fmt='%0.2f', rightside_up=True, manual=False) q1Grid = reshaper.vecs2grid(q1) plt.plot(q1Grid[:, 0], q1Grid[:, 1], 'ob', markersize=6) q2_ = reshaper.vecs2grid(q2) plt.plot(q2_[:, 0], q2_[:, 1], 'sr', markersize=6) plt.xlabel('q1') plt.ylabel('q2') plt.title( 'Response surface by Lagrange Int.\n over space-1 using blue circles') # plt.subplot(1, 3, 3) ax = plt.gca() fTest2_Grid = fTest2.reshape(nTest, order='F').T CS3 = plt.contour(qTest2[0], qTest2[1], fTest2_Grid, 20) #,cmap=plt.get_cmap('viridis')) plt.clabel(CS3, inline=True, fontsize=13, colors='k', fmt='%0.2f', rightside_up=True, manual=False) plt.plot(q2_[:, 0], q2_[:, 1], 'sr', markersize=6) plt.xlabel('q1') plt.ylabel('q2') plt.title('Response surface by PCE over space-2 \n using red squares') plt.xlim(space1[0][:]) plt.ylim(space1[1][:]) plt.show()
def gprTorch_2d_singleTask_test(): """ Test for GPR for 2d input """ ## def plot_trainData(n,fSamples,noiseSdev,yTrain): """ Plot the noisy training data which are used in GPR. """ plt.figure(figsize=(10,5)) x_=np.zeros(n) for i in range(n): x_[i]=i+1 for i in range(500): #only for plottig possible realizations noise_=noiseSdev*np.random.randn(n) plt.plot(x_,fSamples+noise_,'.',color='steelblue',alpha=0.4,markersize=1) plt.errorbar(x_,fSamples,yerr=1.96*abs(noiseSdev),ls='none',capsize=5,ecolor='k', elinewidth=4,label=r'$95\%$ CI in Obs.') plt.plot(x_,fSamples,'o' ,markersize=6,markerfacecolor='lime', markeredgecolor='salmon',label='Mean Observation') plt.plot(x_,yTrain ,'xr' ,markersize=6,label='Sample Observation') plt.legend(loc='best',fontsize=15) plt.ylabel('QoI',fontsize=17) plt.xlabel('Simulation Index',fontsize=17) plt.xticks(fontsize=15) plt.yticks(fontsize=15) plt.title('Training data with associated confidence') plt.show() ## def trainDataGen(p,sampleType,n,qBound,fExName,noiseType): """ Generate Training Data """ # (a) xTrain if sampleType=='grid': nSamp=n[0]*n[1] gridList=[] for i in range(p): #grid_=torch.linspace(qBound[i][0],qBound[i][1],n[i]) #torch grid_=np.linspace(qBound[i][0],qBound[i][1],n[i]) gridList.append(grid_) xTrain=reshaper.vecs2grid(gridList) # xTrain = gpytorch.utils.grid.create_data_from_grid(gridList) #torch elif sampleType=='random': nSamp=n #number of random samples xTrain=sampling.LHS_sampling(n,qBound) # (b) Observation noise #noiseSdev=torch.ones(nTot).mul(0.1) #torch noiseSdev=noiseGen(nSamp,noiseType,xTrain,fExName) #yTrain = torch.sin(mt.pi*xTrain[:,0])*torch.cos(.25*mt.pi*xTrain[:,1])+ # torch.randn_like(xTrain[:,0]).mul(0.1) #torch # (c) Training response yTrain=analyticTestFuncs.fEx2D(xTrain[:,0],xTrain[:,1],fExName,'comp').val yTrain_noiseFree=yTrain yTrain=yTrain_noiseFree+noiseSdev*np.random.randn(nSamp) return xTrain,yTrain,noiseSdev,yTrain_noiseFree ## def noiseGen(n,noiseType,xTrain,fExName): """ Generate a 1D numpy array of standard deviations of the observation noise """ if noiseType=='h**o': sd=0.2 # noise standard deviation (Note: non-zero, to avoid instabilities) sdV=sd*np.ones(n) elif noiseType=='hetero': #sdMin=0.01 #sdMax=0.5 #sdV=sdMin+(sdMax-sdMin)*np.linspace(0.0,1.0,n) #sdV=0.15*np.ones(n) sdV=0.1*(analyticTestFuncs.fEx2D(xTrain[:,0],xTrain[:,1],fExName,'comp').val+0.001) return sdV # #----- SETTINGS qBound=[[-2,2],[-2,2]] #Admissible range of parameters fExName='type1' #Type of simulator in analyticTestFuncs.fEx2D #'type1', 'type2', 'type3', 'Rosenbrock' sampleType='random' #'random' or 'grid': type of training samples if sampleType=='grid': n=[9,9] #number of training samples in each input dimension elif sampleType=='random': n=100 #total number of training samples drawn randomly noiseType='hetero' #noise type: 'h**o'=homoscedastic, 'hetero'=heterscedastic #options for GPR nIter_=1000 #number of iterations in optimization of GPR hyperparameters lr_ =0.05 #learning rate in the optimization of the hyperparameters convPlot_=True #plot convergence of optimization of GPR hyperparameters nTest=[21,20] #number of test points in each parameter dimension #------------------------------------------------ #(0) Assemble the gprOpts dict gprOpts={'nIter':nIter_,'lr':lr_,'convPlot':convPlot_} #(1) Generate training data p=len(qBound) #dimension of the input xTrain,yTrain,noiseSdev,yTrain_noiseFree=trainDataGen(p,sampleType,n,qBound,fExName,noiseType) nSamp=yTrain.shape[0] plot_trainData(nSamp,yTrain_noiseFree,noiseSdev,yTrain) #(2) Create the test samples xTestList=[] for i in range(p): #grid_=torch.linspace(qBound[i][0],qBound[i][1],20) #torch grid_=np.linspace(qBound[i][0],qBound[i][1],nTest[i]) xTestList.append(grid_) xTest=reshaper.vecs2grid(xTestList) #(3) Construct the GPR based on the training data and make predictions at the test samples gpr_=gpr(xTrain,yTrain[:,None],noiseSdev,xTest,gprOpts) post_f=gpr_.post_f post_obs=gpr_.post_y # Predicted mean and variance of the posteriors at the test grid fP_=gprPost(post_f,nTest) fP_.torchPost() post_f_mean=fP_.mean post_f_sdev=fP_.sdev lower_f=fP_.ciL upper_f=fP_.ciU obsP_=gprPost(post_obs,nTest) obsP_.torchPost() post_obs_mean=obsP_.mean post_obs_sdev=obsP_.sdev lower_obs=obsP_.ciL upper_obs=obsP_.ciU # Plots with torch.no_grad(): fig = plt.figure(figsize=(16,4)) ax = fig.add_subplot(141) fEx_test=analyticTestFuncs.fEx2D(xTest[:,0],xTest[:,1],fExName,'comp').val CS0=ax.contour(xTestList[0],xTestList[1],fEx_test.reshape((nTest[0],nTest[1]),order='F').T,levels=40) ax.clabel(CS0, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False) ax.plot(xTrain[:,0],xTrain[:,1],'or') ax.set_title(r'Exact $f(q)$') ax = fig.add_subplot(142) CS1=ax.contour(xTestList[0],xTestList[1],(post_f_mean).T,levels=40) ax.clabel(CS1, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False) ax.plot(xTrain[:,0],xTrain[:,1],'or') ax.set_title(r'Mean Posterior of $f(q)$') ax = fig.add_subplot(143) CS2=ax.contour(xTestList[0],xTestList[1],upper_obs.T,levels=40) ax.clabel(CS2, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False) ax.plot(xTrain[:,0],xTrain[:,1],'or') ax.set_title(r'Upper Confidence for Observations') ax = fig.add_subplot(144) CS2=ax.contour(xTestList[0],xTestList[1],lower_obs.T,levels=40) ax.clabel(CS2, inline=True, fontsize=15,colors='k',fmt='%0.2f',rightside_up=True,manual=False) ax.plot(xTrain[:,0],xTrain[:,1],'or') ax.set_title(r'Lower Confidence for Observations') plt.show() #2dplot pltOpts={'title':'Mean posterior of f(q)', 'xlab':r'$q_1$', 'ylab':r'$q_2$'} gprPlot(pltOpts).torch2d_2dcont(xTrain,xTestList,post_f_mean) #3d plot gprPlot().torch2d_3dSurf(xTrain,yTrain,xTestList,post_obs)
def sobol_2par_unif_test(): """ Test for sobol when we have 2 uncertain parameters q1, q2. Sobol indices are computed for f(q1,q2)=q1**2.+q1*q2 that is analyticTestFuncs.fEx2D('type3'). Indices are computed from the following methods: * Method1: Direct computation by UQit * Method2: First a PCE is constructed and then its values are used to compute Sobol indices * Method3: Analytical expressions (reference values) """ #-------------------------- #------- SETTINGS n=[101, 100] #number of samples for q1 and q2, Method1 qBound=[[-3,1], #admissible range of parameters [-1,2]] nQpce=[5,6] #number of GQ points for Method2 #-------------------------- fType='type3' #type of analytical function p=len(n) distType=['Unif']*p #(1) Samples from parameters space q=[] pdf=[] for i in range(p): q.append(np.linspace(qBound[i][0],qBound[i][1],n[i])) pdf.append(np.ones(n[i])/(qBound[i][1]-qBound[i][0])) #(2) Compute function value at the parameter samples fEx_=analyticTestFuncs.fEx2D(q[0],q[1],fType,'tensorProd') fEx=np.reshape(fEx_.val,n,'F') #(3) Compute Sobol indices direct numerical integration sobol_=sobol(q,fEx,pdf) Si=sobol_.Si STi=sobol_.STi Sij=sobol_.Sij #(4) Construct a PCE and then use the predictions of that in numerical integration #for computing Sobol indices. #Generate observations at Gauss-Legendre points xi=[] qpce=[] for i in range(p): samps=sampling.trainSample(sampleType='GQ',GQdistType=distType[i],qInfo=qBound[i],nSamp=nQpce[i]) xi.append(samps.xi) qpce.append(samps.q) fVal_pceCnstrct=analyticTestFuncs.fEx2D(qpce[0],qpce[1],fType,'tensorProd').val #Construct the PCE xiGrid=reshaper.vecs2grid(xi) pceDict={'p':2,'sampleType':'GQ','truncMethod':'TP','pceSolveMethod':'Projection', 'distType':distType} pce_=pce(fVal=fVal_pceCnstrct,nQList=nQpce,xi=xiGrid,pceDict=pceDict) #Use the PCE to predict at test samples from parameter space qpceTest=[] xiTest=[] for i in range(p): testSamps=sampling.testSample('unifSpaced',GQdistType=distType[i],qBound=qBound[i],nSamp=n[i]) xiTest.append(testSamps.xi) qpceTest.append(testSamps.q) fPCETest_=pceEval(coefs=pce_.coefs,kSet=pce_.kSet,xi=xiTest,distType=distType) fPCETest=fPCETest_.pceVal #compute Sobol indices sobolPCE_=sobol(qpceTest,fPCETest,pdf) Si_pce=sobolPCE_.Si Sij_pce=sobolPCE_.Sij #(5) Exact Sobol indices (analytical expressions) if fType=='type3': fEx_.sobol(qBound) Si_ex=fEx_.Si STi_ex=fEx_.STi Sij_ex=fEx_.Sij #(6) results print(' > Main Indices by UQit:\n\t S1=%g, S2=%g, S12=%g' %(Si[0],Si[1],Sij[0])) print(' > Main indice by gPCE+Numerical Integration:\n\t S1=%g, S2=%g, S12=%g' %(Si_pce[0],Si_pce[1],Sij_pce[0])) print(' > Main Analytical Reference:\n\t S1=%g, S2=%g, S12=%g' %(Si_ex[0],Si_ex[1],Sij_ex[0])) print(' > Total Indices by UQit:\n\t ST1=%g, ST2=%g' %(STi[0],STi[1])) print(' > Total Analytical Reference:\n\t ST1=%g, ST2=%g' %(STi_ex[0],STi_ex[1]))
def pce_2d_test(): """ Test PCE for 2D uncertain parameter """ #---- SETTINGS------------ #Parameters specifications distType = ['Norm', 'Norm'] #distribution type of the parameters q1, q2 qInfo = [ [-2, 1], #info on parameters [-2, 0.4] ] nQ = [7, 6] #number of training samples of parameters nTest = [121, 120] #number of test points in parameter spaces to evaluate PCE #PCE Options truncMethod = 'TO' #'TP'=Tensor Product #'TO'=Total Order sampleType = [ 'GQ', 'GQ' ] #'GQ'=Gauss Quadrature nodes ('Projection' or 'Regression') #For other type of samples, see sampling.py, trainSample => only 'Regression' can be used #'LHS': Latin Hypercube Sampling (only when all distType='Unif') fType = 'type1' #Type of the exact model response, 'type1', 'type2', 'type3', 'Rosenbrock' pceSolveMethod = 'Regression' #'Regression': for any combination of sampling and truncation methods #'Projection': only for 'GQ'+'TP' if truncMethod == 'TO': LMax = 8 #max polynomial order in each parameter dimention #------------------------ p = len(distType) #Assemble the pceDict pceDict = { 'p': p, 'truncMethod': truncMethod, 'sampleType': sampleType, 'pceSolveMethod': pceSolveMethod, 'distType': distType } if truncMethod == 'TO': pceDict.update({'LMax': LMax, 'pceSolveMethod': 'Regression'}) #Generate the training data xi = [] q = [] qBound = [] if sampleType[0] == 'LHS' and sampleType[1] == 'LHS': if distType == ['Unif'] * p: qBound = qInfo xi = sampling.LHS_sampling(nQ[0] * nQ[1], [[-1, 1]] * p) for i in range(p): q.append(pce.mapFromUnit(xi[:, i], qBound[i])) fEx_ = analyticTestFuncs.fEx2D(q[0], q[1], fType, 'comp') xiGrid = xi else: raise ValueError( "LHS works only when all q have 'Unif' distribution.") else: for i in range(p): samps = sampling.trainSample(sampleType=sampleType[i], GQdistType=distType[i], qInfo=qInfo[i], nSamp=nQ[i]) q.append(samps.q) xi.append(samps.xi) qBound.append(samps.qBound) fEx_ = analyticTestFuncs.fEx2D(q[0], q[1], fType, 'tensorProd') xiGrid = reshaper.vecs2grid(xi) fVal = fEx_.val #Construct the PCE pce_ = pce(fVal=fVal, xi=xiGrid, pceDict=pceDict, nQList=nQ) fMean = pce_.fMean fVar = pce_.fVar pceCoefs = pce_.coefs kSet = pce_.kSet #Plot the convergence indicator of the PCE convPlot(coefs=pceCoefs, distType=distType, kSet=kSet) #Generate test samples for the parameters and evaluate the exact response surface at them qTest = [] xiTest = [] for i in range(p): testSamps = sampling.testSample('unifSpaced', GQdistType=distType[i], qInfo=qInfo[i], qBound=qBound[i], nSamp=nTest[i]) qTest_ = testSamps.q xiTest_ = testSamps.xi qTest.append(qTest_) xiTest.append(xiTest_) fTest = analyticTestFuncs.fEx2D(qTest[0], qTest[1], fType, 'tensorProd').val #Evaluate PCE at the test samples pcePred_ = pceEval(coefs=pceCoefs, xi=xiTest, distType=distType, kSet=kSet) fPCE = pcePred_.pceVal #Use MC method to directly estimate reference values for the mean and varaiance of f(q) fEx_.moments(distType, qInfo) fMean_mc = fEx_.mean fVar_mc = fEx_.var #Compare the PCE estimates for moments of f(q) with the reference values from MC print(writeUQ.printRepeated('-', 70)) print('------------ MC -------- PCE --------- Error % ') print('Mean of f(q) = %g\t%g\t%g' % (fMean_mc, fMean, (fMean - fMean_mc) / fMean_mc * 100.)) print('Var of f(q) = %g\t%g\t%g' % (fVar_mc, fVar, (fVar - fVar_mc) / fVar_mc * 100.)) print(writeUQ.printRepeated('-', 70)) #Plot the exact and PCE response surfaces as contours in the parameters space # Create 2D grid from the test samples and plot the contours of response surface over it fTestGrid = fTest.reshape(nTest, order='F') fErrorGrid = (abs(fTestGrid - fPCE)) # 2D grid from the sampled parameters if sampleType[0] == 'LHS' and sampleType[1] == 'LHS': qGrid = reshaper.vecsGlue(q[0], q[1]) else: qGrid = reshaper.vecs2grid(q) plt.figure(figsize=(21, 8)) plt.subplot(1, 3, 1) ax = plt.gca() CS1 = plt.contour(qTest[0], qTest[1], fTestGrid.T, 40) plt.clabel(CS1, inline=True, fontsize=13, colors='k', fmt='%0.2f', rightside_up=True, manual=False) plt.plot(qGrid[:, 0], qGrid[:, 1], 'o', color='r', markersize=7) plt.xlabel(r'$q_1$') plt.ylabel(r'$q_2$') plt.title('Exact Response') plt.subplot(1, 3, 2) ax = plt.gca() CS2 = plt.contour(qTest[0], qTest[1], fPCE.T, 40) plt.clabel(CS2, inline=True, fontsize=13, colors='k', fmt='%0.2f', rightside_up=True, manual=False) plt.plot(qGrid[:, 0], qGrid[:, 1], 'o', color='r', markersize=7) plt.xlabel(r'$q_1$') plt.ylabel(r'$q_2$') plt.title('PCE Response') plt.subplot(1, 3, 3) ax = plt.gca() CS3 = plt.contour(qTest[0], qTest[1], fErrorGrid.T, 40) plt.clabel(CS3, inline=True, fontsize=13, colors='k', fmt='%0.2f', rightside_up=True, manual=False) plt.xlabel(r'$q_1$') plt.ylabel(r'$q_2$') plt.plot(qGrid[:, 0], qGrid[:, 1], 'o', color='r', markersize=7) plt.title('|Exact-Surrogate|') plt.show()