def initialize(self): # Need to modify this dictionary when we change the SA constants #import pdb; pdb.set_trace() #sys.stdout = open(os.devnull, "w") self.aoptions = aeroOptions self.woptions = warpOptions self.ooptions = optOptions self.uoptions = uqOptions self.Pr = 0. self.P = self.uoptions['P'] self.NS0 = self.uoptions['NS0'] # Generate FFD and DVs if rank == 0: rank0dvg = pf.createFFD() else: rank0dvg = None self.DVGeo = comm.bcast(rank0dvg, root=0) # starting flat mesh meshname = self.aoptions['gridFile'] gridFile = meshname # flow characteristics alpha = 0.0 mach = self.ooptions['mach'] #0.95 Re = self.ooptions['Re'] #50000 Re_L = 1.0 temp = 540 arearef = 2.0 chordref = 1.0 # Spalart Allmaras model constants, to be changed in UQ (4 for now) saconstsm = [0.41, 0.1355, 0.622, 0.66666666667] self.saconstsb = [7.1, 0.3, 2.0, 1.0, 2.0, 1.2, 0.5, 2.0] self.saconsts = saconstsm + self.saconstsb self.aoptions['SAConsts'] = self.saconsts #self.gridSol = f'{meshname}_{saconstsm}_sol' solname = self.ooptions['prob_name'] self.gridSol = f'{solname}_sol' # Get a set of UQ sample points (LHS) #if self.ooptions['run_once']: # self.sample = self.uoptions['dist'] #else # Scatter samples, multi-point parallelism if self.uoptions['MCTimeBudget']: self.aps = [] self.solvers = [] self.meshes = [] self.current_samples = self.NS0 if rank == 0: rank0sam = plate_sa_lhs.genLHS(s=self.current_samples) else: rank0sam = None self.sample = comm.bcast(rank0sam, root=0) self.cases = divide_cases(self.NS0, size) # Scatter samples on each level, multi-point parallelism self.samplep = self.sample[self.cases[rank]] self.nsp = len(self.cases[rank]) # Create solvers for the preliminary data for i in range(self.nsp): namestr = self.gridSol + "_" + str(self.cases[rank][i]) # create meshes self.meshes.append( USMesh(options=self.woptions, comm=MPI.COMM_SELF)) # create aeroproblems self.aps.append( AeroProblem(name=namestr, alpha=alpha, mach=mach, reynolds=Re, reynoldsLength=Re_L, T=temp, areaRef=arearef, chordRef=chordref, evalFuncs=['cd'])) time.sleep(0.1) # this solves a few problems for some reason # create solvers self.solvers.append( ADFLOW(options=self.aoptions, comm=MPI.COMM_SELF)) saconstsm = self.samplep[i].tolist() self.saconsts = saconstsm + self.saconstsb self.solvers[i].setOption('SAConsts', self.saconsts) self.solvers[i].setDVGeo(self.DVGeo) self.solvers[i].setMesh(self.meshes[i]) print("what up %i", str(rank)) coords = self.solvers[i].getSurfaceCoordinates( groupName=self.solvers[i].allWallsGroup) self.solvers[i].DVGeo.addPointSet(coords, 'coords') # start looping over mesh levels sumt = 0. sumtp = 0. Et = 0. funcs = {} a_init = self.DVGeo.getValues() a_init['pnts'][:] = self.ooptions['DVInit'] dvdict = {'pnts': a_init['pnts']} for i in range(self.nsp): saconstsm = self.samplep[i].tolist() self.saconsts = saconstsm + self.saconstsb self.solvers[i].setOption('SAConsts', self.saconsts) self.solvers[i].DVGeo.setDesignVars(dvdict) self.aps[i].setDesignVars(dvdict) pc0 = time.process_time() self.solvers[i](self.aps[i]) self.solvers[i].evalFunctions(self.aps[i], funcs) pc1 = time.process_time() astr = self.gridSol + "_" + str(self.cases[rank][i]) + "_cd" sumtp += (pc1 - pc0) sumt = comm.allreduce(sumtp) Et = sumt / self.NS0 self.NS = math.ceil(self.P / Et) self.Pr = self.NS * Et else: self.NS = self.uoptions['NS'] #import pdb; pdb.set_trace() if rank == 0: rank0sam = plate_sa_lhs.genLHS(s=self.NS) else: rank0sam = None self.sample = comm.bcast(rank0sam, root=0) self.cases = divide_cases(self.NS, size) self.nsp = len(self.cases[rank]) #int(ns/size) # samples per processor #import pdb; pdb.set_trace() self.samplep = self.sample[self.cases[ rank]] #self.sample[(rank*self.nsp):(rank*self.nsp+(self.nsp))] #shouldn't really need to "scatter" per se #import pdb; pdb.set_trace() #assert len(self.samplep) == self.nsp # Actually create solvers (and aeroproblems?) (and mesh?) now self.aps = [] self.solvers = [] self.meshes = [] #self.mesh = USMesh(options=self.woptions, comm=MPI.COMM_SELF) for i in range(self.nsp): namestr = self.gridSol + "_" + str(self.cases[rank][i]) # create meshes self.meshes.append( USMesh(options=self.woptions, comm=MPI.COMM_SELF)) # create aeroproblems self.aps.append( AeroProblem(name=namestr, alpha=alpha, mach=mach, reynolds=Re, reynoldsLength=Re_L, T=temp, areaRef=arearef, chordRef=chordref, evalFuncs=['cd'])) time.sleep(0.1) # this solves a few problems for some reason # create solvers self.solvers.append( ADFLOW(options=self.aoptions, comm=MPI.COMM_SELF)) # if not self.ooptions['run_once']: # saconstsm = self.samplep[i].tolist() # else: saconstsm = self.samplep[i].tolist() self.saconsts = saconstsm + self.saconstsb self.solvers[i].setOption('SAConsts', self.saconsts) self.solvers[i].setDVGeo(self.DVGeo) self.solvers[i].setMesh(self.meshes[i]) print("what up %i", str(rank)) coords = self.solvers[i].getSurfaceCoordinates( groupName=self.solvers[i].allWallsGroup) self.solvers[i].DVGeo.addPointSet(coords, 'coords') # Set constraints, should only need one of those solvers, the meshes are all the same self.DVCon = DVConstraints() self.DVCon2 = DVConstraints() self.DVCon.setDVGeo(self.solvers[0].DVGeo.getFlattenedChildren()[1]) self.DVCon2.setDVGeo(self.solvers[0].DVGeo) self.DVCon.setSurface(self.solvers[0].getTriangulatedMeshSurface( groupName='allSurfaces')) # set extra group for surface area condition self.DVCon2.setSurface(self.solvers[0].getTriangulatedMeshSurface(), name='wall') # DV should be same into page (not doing anything right now) #import pdb; pdb.set_trace() lIndex = self.solvers[0].DVGeo.getFlattenedChildren()[1].getLocalIndex( 0) indSetA = [] indSetB = [] nXc = optOptions['NX'] self.NC = math.trunc( ((1.0 - self.ooptions['DVFraction']) * self.ooptions['NX'])) ind = [ int(nXc / 2) - int(self.NC / 2), int(nXc / 2) + int(self.NC / 2) ] for i in range(ind[0], ind[1]): indSetA.append(lIndex[i, 0, 1]) indSetB.append(lIndex[i, 1, 1]) # for i in range(lIndex.shape[0]): # indSetA.append(lIndex[i, 0, 1]) # indSetB.append(lIndex[i, 1, 1]) self.DVCon.addLinearConstraintsShape(indSetA, indSetB, factorA=1.0, factorB=-1.0, lower=0, upper=0, name='eqs') # Thickness constraints (one for each active DV) #import pdb; pdb.set_trace() # Maximum thickness of the domain, translates to minimum thickness of bump ub = 1.0 - self.ooptions['DCMinThick'] tcf = self.ooptions['DCThickFrac'] ra = self.ooptions['bumpBounds'] lim = self.ooptions['DCMinArea'] span = numpy.linspace(0, 1, nXc) xc = span * (ra[1] - ra[0]) + ra[0] #ind = range(int(nXc/2) - int(self.NC/2), int(nXc/2) + int(self.NC/2))) ind = [ int(nXc / 2) - int(tcf * self.NC / 2), int(nXc / 2) + int(tcf * self.NC / 2) ] ptList = numpy.zeros([2, 3]) ptList[:, 0] = xc[ind] ptList[:, 1] = 0.5 ptList[:, 2] = 0.5 if self.ooptions['use_area_con']: self.DVCon2.addSurfaceAreaConstraint(lower=lim, upper=10., name='sas', surfaceName='wall') else: self.DVCon2.addThicknessConstraints1D(ptList, self.NC, [0, 0, 1], lower=0.5, upper=ub, name='tcs') print("excuse me") dummy = rank dsum = comm.allgather(dummy) sys.stdout = sys.__stdout__
def MFMC(self): # Use an MFMC algorithm to determine optimal sample distribution and coefficients among mesh levels # We do this once before optimization, then compute statistics with the same set of samples and coeffs at every iteration # start with initial samples # Get a set of UQ sample points (LHS), enough for each level at the start #sys.stdout = open(os.devnull, "w") # flow characteristics alpha = 0.0 mach = self.ooptions['mach'] #0.95 Re = self.ooptions['Re'] #50000 Re_L = 1.0 tempR = 540 arearef = 2.0 chordref = 1.0 a_init = self.DVGeo.getValues() a_init['pnts'][:] = self.ooptions['DVInit'] self.current_samples = self.NS0 * self.Lmax if rank == 0: rank0sam = plate_sa_lhs.genLHS(s=self.current_samples) else: rank0sam = None self.sample = comm.bcast(rank0sam, root=0) N1 = [] a1 = numpy.zeros(self.Lmax) r1 = numpy.zeros(self.Lmax) # Scatter samples on each level, multi-point parallelism for i in range(self.Lmax): self.cases.append(divide_cases(self.NS0, size)) for j in range(len(self.cases[i])): for k in range(len(self.cases[i][j])): self.cases[i][j][k] += i * self.NS0 #self.nsp.append(len(self.cases[i][rank]))#int(ns/size) # samples per processor self.samplep.append(self.sample[self.cases[i][rank]]) #import pdb; pdb.set_trace() #self.samplep = self.sample[self.cases[rank]]#self.sample[(rank*self.nsp):(rank*self.nsp+(self.nsp))] #shouldn't really need to "scatter" per se #import pdb; pdb.set_trace() # for i in range(self.Lmax): # assert len(self.samplep[i]) == self.nsp[i] # Create solvers for the preliminary data nslp = [] nslt = [] for k in range(self.Lmax): alist = [] slist = [] mlist = [] nslp.append(len(self.cases[k][rank])) nslt.append(sum([len(self.cases[k][x]) for x in range(size)])) for i in range(nslp[k]): namestr = self.gridSol + "_" + str(self.cases[k][rank][i]) # create meshes leveloptions = self.woptions leveloptions['gridFile'] = self.meshnames[self.mord[k]] #import pdb; pdb.set_trace() mlist.append(USMesh(options=leveloptions, comm=MPI.COMM_SELF)) # create aeroproblems aloptions = self.aoptions aloptions['gridFile'] = self.meshnames[self.mord[k]] alist.append( AeroProblem(name=namestr, alpha=alpha, mach=mach, reynolds=Re, reynoldsLength=Re_L, T=tempR, areaRef=arearef, chordRef=chordref, evalFuncs=['cd'])) time.sleep(0.1) # this solves a few problems for some reason # create solvers slist.append(ADFLOW(options=aloptions, comm=MPI.COMM_SELF)) # if not self.ooptions['run_once']: # saconstsm = self.samplep[i].tolist() # else: saconstsm = self.samplep[0][i].tolist() self.saconsts = saconstsm + self.saconstsb slist[i].setOption('SAConsts', self.saconsts) slist[i].setDVGeo(self.DVGeo) slist[i].setMesh(mlist[i]) coords = slist[i].getSurfaceCoordinates( groupName=slist[i].allWallsGroup) slist[i].DVGeo.addPointSet(coords, 'coords') self.aps.append(alist) self.solvers.append(slist) self.meshes.append(mlist) # Solve the preliminary samples # start looping over mesh levels sumt = [] sumtp = [] nslp = [] nslt = [] sum1 = [] mus = [] sump = [] musp = [] sumpm = [] muspm = [] summ = [] musm = [] Et = numpy.zeros(self.Lmax) E = numpy.zeros(self.Lmax) V = numpy.zeros(self.Lmax) S = numpy.zeros(self.Lmax) N1 = [] for k in range(self.Lmax): nslp.append(len(self.cases[k][rank])) nslt.append(sum([len(self.cases[k][x]) for x in range(size)])) dvdict = {'pnts': a_init['pnts']} funcs = {} sumtp.append(0.0) sump.append(0.) musp.append(numpy.zeros(nslp[k])) sumpm.append(0.) muspm.append(numpy.zeros(nslp[k])) for i in range(nslp[k]): # just do this again in case saconstsm = self.samplep[0][i].tolist() self.saconsts = saconstsm + self.saconstsb self.solvers[k][i].setOption('SAConsts', self.saconsts) self.solvers[k][i].DVGeo.setDesignVars(dvdict) self.aps[k][i].setDesignVars(dvdict) pc0 = time.process_time() self.solvers[k][i](self.aps[k][i]) self.solvers[k][i].evalFunctions(self.aps[k][i], funcs) pc1 = time.process_time() astr = self.gridSol + "_" + str(self.cases[k][rank][i]) + "_cd" musp[k][i] = funcs[astr] sump[k] += funcs[astr] sumtp[k] += (pc1 - pc0) # compute mean and variance estimate from start up samples for k in range(self.Lmax): sumt.append(comm.allreduce(sumtp[k])) sum1.append(comm.allreduce(sump[k])) mus.append(comm.allgather(musp[k])) summ.append(comm.allreduce(sumpm[k])) musm.append(comm.allgather(muspm[k])) mus[k] = numpy.concatenate(mus[k][:]) musm[k] = numpy.concatenate(musm[k][:]) #import pdb; pdb.set_trace() # mean at each level Et[k] = sumt[k] / nslt[k] E[k] = (sum1[k]) / nslt[k] #+summ[k] sum2 = 0. for i in range(len(mus[k])): #loop over processors sum2 += (mus[k][i] - E[k])**2 V[k] = sum2 / nslt[k] S[k] = math.sqrt(V[k]) # compute correlation matrix and rearrange models if necessary ordered = False while not ordered: rho = numpy.corrcoef(mus) ordered = True # check if contradicted #tarr = rho[0,1:] for k in range(self.Lmax - 2): test = rho[0, 1 + k]**2 - rho[0, 2 + k]**2 if test < 0: ordered = False tarr = -rho[0, :]**2 if not ordered: sind = numpy.argsort(tarr) #import pdb; pdb.set_trace() self.mord[:] = [self.mord[i] for i in sind] E[:] = [E[i] for i in sind] Et[:] = [Et[i] for i in sind] V[:] = [V[i] for i in sind] S[:] = [S[i] for i in sind] mus[:] = [mus[i] for i in sind] # now compute N1 and a1 using sigma, rho, w, and p for k in range(self.Lmax): a1[k] = S[0] * rho[0, k] / S[k] if k == 0: r1[k] = 1 elif k == self.Lmax - 1: work = Et[0] * (rho[0, k]**2) work /= Et[k] * (1 - rho[0, 1]**2) r1[k] = math.sqrt(work) else: work = Et[0] * (rho[0, k - 1]**2 - rho[0, k]**2) work /= Et[k] * (1 - rho[0, 1]**2) r1[k] = math.sqrt(work) for k in range(self.Lmax): N1.append(0) nsf = self.P / numpy.dot(Et, r1) N1[0] = math.ceil(nsf) for k in range(self.Lmax): nsf = N1[0] * r1[k] N1[k] = math.ceil(nsf) # limit the number of samples on the last one to pass the sanity check, for debug sanity = numpy.dot(N1, Et) if sanity > 1.2 * self.P: N1n = (self.P - numpy.dot(N1[0:self.Lmax - 2], Et[0:self.Lmax - 2])) / Et[self.Lmax - 1] N1[self.Lmax - 1] = math.ceil(N1n) self.Pr = numpy.dot(N1, Et) self.N1 = N1 self.a1 = a1 #import pdb; pdb.set_trace() if rank == 0: print("MFMC Completed, Samples per level: ", N1)
def dist_samples(self): # If we already have the number of samples, just create as many solvers as needed at each level # Just do this after running MLMC() anyway # flow characteristics alpha = 0.0 mach = self.ooptions['mach'] #0.95 Re = self.ooptions['Re'] #50000 Re_L = 1.0 tempR = 540 arearef = 2.0 chordref = 1.0 a_init = self.DVGeo.getValues() a_init['pnts'][:] = self.ooptions['DVInit'] self.current_samples = sum(self.N1) if rank == 0: rank0sam = plate_sa_lhs.genLHS(s=self.current_samples) else: rank0sam = None self.sample = comm.bcast(rank0sam, root=0) #import pdb; pdb.set_trace() # Scatter samples on each level, multi-point parallelism self.cases = [] self.samplep = [] for i in range(self.Lmax): self.cases.append(divide_cases(self.N1[i], size)) for j in range(len(self.cases[i])): for k in range(len(self.cases[i][j])): self.cases[i][j][k] += sum(self.N1[0:i]) #self.nsp.append(len(self.cases[i][rank]))#int(ns/size) # samples per processor self.samplep.append(self.sample[self.cases[i][rank]]) # Actually create solvers (and aeroproblems?) (and mesh?) now self.aps = [] self.solvers = [] self.meshes = [] nslp = [] nslt = [] for k in range(self.Lmax): alist = [] slist = [] mlist = [] nslp.append(len(self.cases[k][rank])) nslt.append(sum([len(self.cases[k][x]) for x in range(size)])) for i in range(nslp[k]): namestr = self.gridSol + "_" + str(self.cases[k][rank][i]) # create meshes leveloptions = self.woptions leveloptions['gridFile'] = self.meshnames[self.mord[k]] mlist.append(USMesh(options=leveloptions, comm=MPI.COMM_SELF)) # create aeroproblems aloptions = self.aoptions aloptions['gridFile'] = self.meshnames[self.mord[k]] alist.append( AeroProblem(name=namestr, alpha=alpha, mach=mach, reynolds=Re, reynoldsLength=Re_L, T=tempR, areaRef=arearef, chordRef=chordref, evalFuncs=['cd'])) time.sleep(0.1) # this solves a few problems for some reason # create solvers slist.append(ADFLOW(options=aloptions, comm=MPI.COMM_SELF)) saconstsm = self.samplep[self.Lmax - 1][i].tolist() self.saconsts = saconstsm + self.saconstsb slist[i].setOption('SAConsts', self.saconsts) slist[i].setDVGeo(self.DVGeo) slist[i].setMesh(mlist[i]) coords = slist[i].getSurfaceCoordinates( groupName=slist[i].allWallsGroup) slist[i].DVGeo.addPointSet(coords, 'coords') self.aps.append(alist) self.solvers.append(slist) self.meshes.append(mlist)
def MLMC(self): # Use an MLMC algorithm to determine an optimal sample distribution between existing mesh levels # We do this once before optimization, then compute statistics with the same set of samples at every iteration # start with initial samples # Get a set of UQ sample points (LHS), enough for each level at the start #sys.stdout = open(os.devnull, "w") # flow characteristics alpha = 0.0 mach = self.ooptions['mach'] #0.95 Re = self.ooptions['Re'] #50000 Re_L = 1.0 tempR = 540 arearef = 2.0 chordref = 1.0 a_init = self.DVGeo.getValues() a_init['pnts'][:] = self.ooptions['DVInit'] self.current_samples = self.NS0 * self.Lmax if rank == 0: rank0sam = plate_sa_lhs.genLHS(s=self.current_samples) else: rank0sam = None self.sample = comm.bcast(rank0sam, root=0) # Scatter samples on each level, multi-point parallelism for i in range(self.Lmax): self.cases.append(divide_cases(self.NS0, size)) for j in range(len(self.cases[i])): for k in range(len(self.cases[i][j])): self.cases[i][j][k] += i * self.NS0 #self.nsp.append(len(self.cases[i][rank]))#int(ns/size) # samples per processor self.samplep.append(self.sample[self.cases[i][rank]]) #import pdb; pdb.set_trace() #self.samplep = self.sample[self.cases[rank]]#self.sample[(rank*self.nsp):(rank*self.nsp+(self.nsp))] #shouldn't really need to "scatter" per se #import pdb; pdb.set_trace() # for i in range(self.Lmax): # assert len(self.samplep[i]) == self.nsp[i] # Actually create solvers (and aeroproblems?) (and mesh?) now nslp = [] nslt = [] for k in range(self.Lmax): alist = [] slist = [] mlist = [] alist2 = [] slist2 = [] mlist2 = [] nslp.append(len(self.cases[k][rank])) nslt.append(sum([len(self.cases[k][x]) for x in range(size)])) for i in range(nslp[k]): namestr = self.gridSol + "_" + str(self.cases[k][rank][i]) # create meshes leveloptions = self.woptions leveloptions['gridFile'] = self.meshnames[k] mlist.append(USMesh(options=leveloptions, comm=MPI.COMM_SELF)) # create aeroproblems aloptions = self.aoptions aloptions['gridFile'] = self.meshnames[k] alist.append( AeroProblem(name=namestr, alpha=alpha, mach=mach, reynolds=Re, reynoldsLength=Re_L, T=tempR, areaRef=arearef, chordRef=chordref, evalFuncs=['cd'])) time.sleep(0.1) # this solves a few problems for some reason # create solvers slist.append(ADFLOW(options=aloptions, comm=MPI.COMM_SELF)) # if not self.ooptions['run_once']: # saconstsm = self.samplep[i].tolist() # else: saconstsm = self.samplep[k][i].tolist() self.saconsts = saconstsm + self.saconstsb slist[i].setOption('SAConsts', self.saconsts) slist[i].setDVGeo(self.DVGeo) slist[i].setMesh(mlist[i]) coords = slist[i].getSurfaceCoordinates( groupName=slist[i].allWallsGroup) slist[i].DVGeo.addPointSet(coords, 'coords') if k > 0: #create additional solvers at higher levels for the estimators # create meshes namestr = self.gridSol + "_" + str( self.cases[k][rank][i]) + "_m" leveloptions = self.woptions leveloptions['gridFile'] = self.meshnames[k - 1] mlist2.append( USMesh(options=leveloptions, comm=MPI.COMM_SELF)) # create aeroproblems aloptions = self.aoptions aloptions['gridFile'] = self.meshnames[k - 1] alist2.append( AeroProblem(name=namestr, alpha=alpha, mach=mach, reynolds=Re, reynoldsLength=Re_L, T=tempR, areaRef=arearef, chordRef=chordref, evalFuncs=['cd'])) time.sleep( 0.1) # this solves a few problems for some reason # create solvers slist2.append(ADFLOW(options=aloptions, comm=MPI.COMM_SELF)) slist2[i].setOption('SAConsts', self.saconsts) slist2[i].setDVGeo(self.DVGeo) slist2[i].setMesh(mlist2[i]) coords = slist[i].getSurfaceCoordinates( groupName=slist2[i].allWallsGroup) slist2[i].DVGeo.addPointSet(coords, 'coords') self.aps.append(alist) self.solvers.append(slist) self.meshes.append(mlist) if k > 0: self.aps[k] += alist2 self.solvers[k] += slist2 self.meshes[k] += mlist2 #import pdb; pdb.set_trace() # start looping over mesh levels L = 0 M = 4.0 #0.5 #refinement factor? converged = 0 eps = self.uoptions['vartol'] sum1 = [] mus = [] sump = [] musp = [] sumpm = [] muspm = [] summ = [] musm = [] E = [] V = [] N1 = [] while ~converged & L < self.Lmax: # compute start up samples to estimate variance dvdict = {'pnts': a_init['pnts']} funcs = {} nslp = [] nslt = [] for k in range(self.Lmax): nslp.append(len(self.cases[k][rank])) nslt.append(sum([len(self.cases[k][x]) for x in range(size)])) sump.append(0.) musp.append(numpy.zeros(nslp[L])) sumpm.append(0.) muspm.append(numpy.zeros(nslp[L])) for i in range(nslp[L]): # just do this again in case saconstsm = self.samplep[L][i].tolist() self.saconsts = saconstsm + self.saconstsb self.solvers[L][i].setOption('SAConsts', self.saconsts) self.solvers[L][i].DVGeo.setDesignVars(dvdict) self.aps[L][i].setDesignVars(dvdict) self.solvers[L][i](self.aps[L][i]) self.solvers[L][i].evalFunctions(self.aps[L][i], funcs) astr = self.gridSol + "_" + str(self.cases[L][rank][i]) + "_cd" musp[L][i] = funcs[astr] sump[L] += funcs[astr] #import pdb; pdb.set_trace() if L > 0: self.solvers[L][i + nslp[L]].setOption( 'SAConsts', self.saconsts) self.solvers[L][i + nslp[L]].DVGeo.setDesignVars(dvdict) self.aps[L][i + nslp[L]].setDesignVars(dvdict) self.solvers[L][i + nslp[L]](self.aps[L][i + nslp[L]]) self.solvers[L][i + nslp[L]].evalFunctions( self.aps[L][i + nslp[L]], funcs) astr = self.gridSol + "_" + str( self.cases[L][rank][i]) + "_m_cd" muspm[L][i] = -funcs[astr] sumpm[L] += -funcs[astr] # compute mean and variance estimate from start up samples sum1.append(comm.allreduce(sump[L])) mus.append(comm.allgather(musp[L])) summ.append(comm.allreduce(sumpm[L])) musm.append(comm.allgather(muspm[L])) #import pdb; pdb.set_trace() # mean at each level E = numpy.zeros(L + 1) for l in range(L + 1): E[l] = (sum1[l] + summ[l]) / nslt[l] # variance at each level V = numpy.zeros(L + 1) for l in range(L + 1): sum2 = 0. for i in range(len(mus[l])): #range(size): for j in range(len(mus[l][i])): #range(self.nsp): if l > 0: sum2 += ((mus[l][i][j] + musm[l][i][j]) - E[l])**2 else: sum2 += (mus[l][i][j] - E[l])**2 V[l] = sum2 / nslt[l] #import pdb; pdb.set_trace() # now determine the optimal number of samples at each level N1.append(0.) worksum = 0 for l in range(L + 1): worksum += numpy.sqrt(V[l] * (M**l)) for l in range(L + 1): nlf = 2 * numpy.sqrt(V[l] / (M**l)) * worksum / (eps * eps) nlfm = max(nslt[l], math.ceil(nlf)) N1[l] = nlfm # now compute and generate additional samples at each level # first partition samples NEVERMIND (just do it once at each level, no need to repeat) # create the extra number of solvers at each (the current) level # need to loop everything from here on for l in range(L + 1): alist = self.aps[l][0:nslp[l]] slist = self.solvers[l][0:nslp[l]] mlist = self.meshes[l][0:nslp[l]] if l > 0: alist2 = self.aps[l][nslp[l]:] slist2 = self.solvers[l][nslp[l]:] mlist2 = self.meshes[l][nslp[l]:] self.naddedtot[l] = N1[l] - nslt[l] self.current_samples += self.naddedtot[l] #import pdb; pdb.set_trace() if rank == 0: rank0sam = plate_sa_lhs.genLHS(s=self.current_samples) else: rank0sam = None self.sample = comm.bcast(rank0sam, root=0) if self.naddedtot[l] > 0: temp = divide_cases(self.naddedtot[l], size) for i in range(len(temp)): for j in range(len(temp[i])): temp[i][j] += self.current_samples - self.naddedtot[ l] #self.Lmax*self.NS0 + sum(self.naddedtot[0:L]) else: temp = [] if len(temp): for ns in range(size): self.cases[l][ns] += temp[ns] #append nslpnew = len(self.cases[l][rank]) nsltnew = sum([len(self.cases[l][x]) for x in range(size)]) #self.nsp[L] = len(self.cases[L][rank]) #int(ns/size) # samples per processor self.samplep[l] = self.sample[self.cases[l][rank]] for i in range(nslp[l], nslpnew): #need it to be just the extra cases #import pdb; pdb.set_trace() namestr = self.gridSol + "_" + str(self.cases[l][rank][i]) # create meshes leveloptions = self.woptions leveloptions['gridFile'] = self.meshnames[l] mlist.append( USMesh(options=leveloptions, comm=MPI.COMM_SELF)) # create aeroproblems aloptions = self.aoptions aloptions['gridFile'] = self.meshnames[l] alist.append( AeroProblem(name=namestr, alpha=alpha, mach=mach, reynolds=Re, reynoldsLength=Re_L, T=tempR, areaRef=arearef, chordRef=chordref, evalFuncs=['cd'])) time.sleep( 0.1) # this solves a few problems for some reason # create solvers slist.append(ADFLOW(options=aloptions, comm=MPI.COMM_SELF)) saconstsm = self.samplep[l][i].tolist() self.saconsts = saconstsm + self.saconstsb slist[i].setOption('SAConsts', self.saconsts) slist[i].setDVGeo(self.DVGeo) slist[i].setMesh(mlist[i]) coords = slist[i].getSurfaceCoordinates( groupName=slist[i].allWallsGroup) slist[i].DVGeo.addPointSet(coords, 'coords') time.sleep(0.1) if l > 0: #create additional solvers at higher levels for the estimators # create meshes #import pdb; pdb.set_trace() namestr = self.gridSol + "_" + str( self.cases[l][rank][i]) + "_m" leveloptions = self.woptions leveloptions['gridFile'] = self.meshnames[l - 1] mlist2.append( USMesh(options=leveloptions, comm=MPI.COMM_SELF)) # create aeroproblems aloptions = self.aoptions aloptions['gridFile'] = self.meshnames[l - 1] alist2.append( AeroProblem(name=namestr, alpha=alpha, mach=mach, reynolds=Re, reynoldsLength=Re_L, T=tempR, areaRef=arearef, chordRef=chordref, evalFuncs=['cd'])) time.sleep( 0.1) # this solves a few problems for some reason # create solvers slist2.append( ADFLOW(options=aloptions, comm=MPI.COMM_SELF)) slist2[i].setOption('SAConsts', self.saconsts) slist2[i].setDVGeo(self.DVGeo) slist2[i].setMesh(mlist2[i]) coords = slist[i].getSurfaceCoordinates( groupName=slist2[i].allWallsGroup) slist2[i].DVGeo.addPointSet(coords, 'coords') nslp[l] = nslpnew nslt[l] = nsltnew self.aps[l] = alist self.solvers[l] = slist self.meshes[l] = mlist if l > 0: self.aps[l] += alist2 self.solvers[l] += slist2 self.meshes[l] += mlist2 # compute remaining samples sump[l] = 0 sumpm[l] = 0 musp[l] = numpy.zeros(nslp[l]) muspm[l] = numpy.zeros(nslp[l]) for i in range(nslp[l]): # just do this again in case saconstsm = self.samplep[l][i].tolist() self.saconsts = saconstsm + self.saconstsb self.solvers[l][i].setOption('SAConsts', self.saconsts) self.solvers[l][i].DVGeo.setDesignVars(dvdict) self.aps[l][i].setDesignVars(dvdict) self.solvers[l][i](self.aps[l][i]) self.solvers[l][i].evalFunctions(self.aps[l][i], funcs) astr = self.gridSol + "_" + str( self.cases[l][rank][i]) + "_cd" musp[l][i] = funcs[astr] sump[l] += funcs[astr] #import pdb; pdb.set_trace() if l > 0: self.solvers[l][i + nslp[l]].setOption( 'SAConsts', self.saconsts) self.solvers[l][i + nslp[l]].DVGeo.setDesignVars(dvdict) self.aps[l][i + nslp[l]].setDesignVars(dvdict) self.solvers[l][i + nslp[l]](self.aps[l][i + nslp[l]]) self.solvers[l][i + nslp[l]].evalFunctions( self.aps[l][i + nslp[l]], funcs) astr = self.gridSol + "_" + str( self.cases[l][rank][i]) + "_m_cd" muspm[l][i] = -funcs[astr] sumpm[l] += -funcs[astr] # compute mean and variance estimate from all samples sum1[l] = comm.allreduce(sump[l]) mus[l] = comm.allgather(musp[l]) summ[l] = comm.allreduce(sumpm[l]) musm[l] = comm.allgather(muspm[l]) # mean at each level E[l] = (sum1[l] + summ[l]) / nslt[l] # variance at each level sum2 = 0. for i in range(len(mus[l])): #range(size): for j in range(len(mus[l][i])): #range(self.nsp): if l > 0: sum2 += ((mus[l][i][j] + musm[l][i][j]) - E[l])**2 else: sum2 += (mus[l][i][j] - E[l])**2 V[l] = sum2 / nslt[l] # if L == 1: # import pdb; pdb.set_trace() L += 1 #import pdb; pdb.set_trace() #sys.stdout = sys.__stdout__ if rank == 0: print("MLMC Completed, Samples per level: ", N1) self.N1 = N1