def setObjCon(self, func): """ Set the python function handle to compute the final objective and constraints that are combinations of the functionals. Parameters ---------- func : Python function Python function handle """ if not isinstance(func, types.FunctionType): raise Error("func must be a Python function handle.") # Also do some checking on function prototype to make sure it is ok: sig = inspect.signature(func) if len(sig.parameters) not in [1, 2, 3]: raise Error( "The function signature for the function given to 'setObjCon' is invalid. It must be: " + "def objCon(funcs):, def objCon(funcs, printOK): or def objCon(funcs, printOK, passThroughFuncs):" ) # Now we know that there are exactly one or two arguments. self.nUserObjConArgs = len(sig.parameters) self.userObjCon = func
def surfaceSmooth(self, nIter, stepSize, surfFile=None): """ Run smoothing iterations on the body surface Parameters ---------- nIter : int Number of iterations to run stepSize : float Size of step. Must be < 1. Usually less than 0.1 for stability reasons. """ if surfFile is not None: try: from pygeo import pyGeo except ImportError: raise Error( "pyGeo must be available to use the surface " "reprojection object. Try again without specifying " "the surfFile option." ) geoSurf = pyGeo("iges", fileName=surfFile) self.hyp.allocatesurfaces(geoSurf.nSurf) for iSurf in range(geoSurf.nSurf): surf = geoSurf.surfs[iSurf] self.hyp.setsurface(iSurf + 1, surf.ku, surf.kv, surf.tu, surf.tv, surf.coef.T) self.hyp.smoothwrap(nIter, stepSize)
def __init__(self, initType, fileName=None, FFD=False, symmPlane=None, kmax=4, **kwargs): self.initType = initType self.FFD = False self.topo = None # The topology of the volumes/surface self.vols = [] # The list of volumes (pySpline volume) self.nVol = None # The total number of volumessurfaces self.coef = None # The global (reduced) set of control pts self.embeddedVolumes = {} self.symmPlane = symmPlane if initType == "plot3d": self._readPlot3D(fileName, FFD=FFD, kmax=kmax, **kwargs) elif initType == "create": pass else: raise Error( "initType must be one of 'plot3d' or 'create'. ('create' is only for expert debugging)" )
def writeGrid(self, fileName=None): """ Write the current grid to the correct format Parameters ---------- fileName : str or None Filename for grid. Should end in .cgns for CGNS files. For PLOT3D whatever you want. It is not optional for CGNS/PLOT3D. It is not required for OpenFOAM meshes. This call will update the 'points' file. """ if self.fileType in ["CGNS", "PLOT3D"]: if fileName is None: raise Error("fileName is not optional for writeGrid with " "gridType of CGNS or PLOT3D") if self.fileType == "CGNS": # Copy the default and then write if self.comm.rank == 0: shutil.copy(self.getOption("gridFile"), fileName) self.warp.writecgns(fileName) elif self.fileType == "PLOT3D": self.warp.writeplot3d(fileName) elif self.fileType == "OpenFOAM": self._writeOpenFOAMVolumePoints(self.getCommonGrid())
def evalFunctions(self, funcs, config): """ Evaluate the functions this object has and place in the funcs dictionary Parameters ---------- funcs : dict Dictionary to place function values """ # Pull out the most recent set of coordinates: self.coords = self.DVGeo.update(self.name, config=config) self.C, self.KSC2, self.meanC2, self.maxC2 = self.calcCurvature2( self.coords, self.axis, self.nPts, self.eps, self.KSCoeff ) if MPI.COMM_WORLD.rank == 0: print("Curvature squared-curvatures: KS: %f, mean: %f, max: %f" % (self.KSC2, self.meanC2, self.maxC2)) if self.scaled: self.KSC2 /= self.KSC2Ref + 1e-16 self.meanC2 /= self.meanC2Ref + 1e-16 if MPI.COMM_WORLD.rank == 0: print("Normalized squared-curvatures: KS: %f, mean: %f, max: %f" % (self.KSC2, self.meanC2, self.maxC2)) if self.curvatureType == "mean": funcs[self.name] = self.meanC2 elif self.curvatureType == "aggregated": funcs[self.name] = self.KSC2 else: raise Error("curvatureType=%s not supported! Options are: mean or aggregated" % self.curvatureType)
def evalFunctions(self, funcs): """ Evaluate the function this object has and place in the funcs dictionary. Note that this function typically will not need to called since these constraints are supplied as a linear constraint jacobian they constraints themselves need to be revaluated. Parameters ---------- funcs : dict Dictionary to place function values """ cons = [] for key in self.wrt: if key in self.DVGeo.DV_listLocal: cons.extend(self.jac[key].dot( self.DVGeo.DV_listLocal[key].value)) elif key in self.DVGeo.DV_listSectionLocal: cons.extend(self.jac[key].dot( self.DVGeo.DV_listSectionLocal[key].value)) elif key in self.DVGeo.DV_listSpanwiseLocal: cons.extend(self.jac[key].dot( self.DVGeo.DV_listSpanwiseLocal[key].value)) else: raise Error( f"con {self.name} diffined wrt {key}, but {key} not found in DVGeo" ) funcs[self.name] = np.array(cons).real.astype("d")
def writeCGNS(self, fileName): """After we have generated a grid, write it out in a properly formatted 1-Cell wide CGNS file suitable for running in SUmb.""" if not self.gridGenerated: raise Error("No grid has been generated! Run the run() " "command before trying to write the grid!") self.hyp.writecgns(fileName) # Possibly perform autoconnect using cgns_utils if self.comm.rank == 0 and self.getOption("autoConnect"): error = os.system("cgns_utils connect %s" % fileName) if error: raise Error("system command 'cgns_utils connect' failed, \ autoconnect was NOT performed") self.comm.barrier()
def writePlot3D(self, fileName): """After we have generated a grid, write it out to a plot3d file for the user to look at""" if self.gridGenerated: self.hyp.writeplot3d(fileName) else: raise Error("No grid has been generated! Run the run() " "command before trying to write the grid!")
def _readPlot3D(self, fileName, order="f", ku=4, kv=4, nCtlu=4, nCtlv=4): """Load a plot3D file and create the splines to go with each patch Parameters ---------- fileName : str File name to load. Should end in .xyz order : str 'f' for fortran ordering (usual), 'c' for c ordering ku : int Spline order in u kv : int Spline order in v nCtlu : int Number of control points in u nCtlv : int Number of control points in v """ f = open(fileName) binary = False nSurf = geo_utils.readNValues(f, 1, "int", binary)[0] sizes = geo_utils.readNValues(f, nSurf * 3, "int", binary).reshape((nSurf, 3)) # ONE of Patch Sizes index must be one nPts = 0 for i in range(nSurf): if sizes[i, 0] == 1: # Compress back to indices 0 and 1 sizes[i, 0] = sizes[i, 1] sizes[i, 1] = sizes[i, 2] elif sizes[i, 1] == 1: sizes[i, 1] = sizes[i, 2] elif sizes[i, 2] == 1: pass else: raise Error("One of the plot3d indices must be 1") nPts += sizes[i, 0] * sizes[i, 1] surfs = [] for i in range(nSurf): curSize = sizes[i, 0] * sizes[i, 1] surfs.append(np.zeros([sizes[i, 0], sizes[i, 1], 3])) for idim in range(3): surfs[-1][:, :, idim] = geo_utils.readNValues(f, curSize, "float", binary).reshape( (sizes[i, 0], sizes[i, 1]), order=order ) f.close() # Now create a list of spline surface objects: self.surfs = [] self.surfs0 = surfs # Note This doesn't actually fit the surfaces...just produces # the parametrization and knot vectors self.nSurf = nSurf for isurf in range(self.nSurf): self.surfs.append(Surface(X=surfs[isurf], ku=ku, kv=kv, nCtlu=nCtlu, nCtlv=nCtlv))
def addProcSetObjFunc(self, setName, func): """ Add an additional python function handle to compute the functionals Parameters ---------- setName : str Name of set we are setting the function for func : Python function Python function handle """ if setName in self.dummyPSet: return if setName not in self.pSet: raise Error( "setName '%s' has not been added with addProcessorSet." % setName) if not isinstance(func, types.FunctionType): raise Error("func must be a Python function handle.") self.pSet[setName].objFunc.append(func)
def setProcSetSensFunc(self, setName, func): """ Set the python function handle to compute the derivative of the functionals Parameters ---------- setName : str Name of set we are setting the function for func : Python function Python function handle """ if setName in self.dummyPSet: return if setName not in self.pSet: raise Error( "setName '%s' has not been added with addProcessorSet." % setName) if not isinstance(func, types.FunctionType): raise Error("func must be a Python function handle.") self.pSet[setName].sensFunc = [func]
def convertTo1D(value, dim1): """ Generic function to process 'value'. In the end, it must be array of size dim1. value is already that shape, excellent, otherwise, a scalar will be 'upcast' to that size """ if np.isscalar: return value * np.ones(dim1) else: temp = np.atleast_1d(value) if temp.shape[0] == dim1: return value else: raise Error("The size of the 1D array was the incorret shape")
def addProcessorSet(self, setName, nMembers, memberSizes): """ A Processor set is defined as one or more groups of processors that use the same obj() and sens() routines. Members of processor sets typically, but not necessarily, return the same number of functions. In all cases, the function names must be unique. Parameters ---------- setName : str Name of process set. Process set names must be unique. nMembers : int Number of members in the set. memberSizes : int, iteratable Number of processors on each set. If an integer is supplied all members use the same number of processors. If a list or array is provided, a different number of processors on each member can be specified. Examples -------- >>> MP = multiPointSparse.multiPoint(MPI.COMM_WORLD) >>> MP.addProcessorSet('cruise', 3, 32) >>> MP.addProcessorSet('maneuver', 2, [10, 20]) The ``cruise`` set creates 3 processor groups, each of size 32. and the ``maneuver`` set creates 2 processor groups, of size 10 and 20. """ # Lets let the user explicitly set nMembers to 0. This is # equivalent to just turning off that proc set. if nMembers == 0: self.dummyPSet.add(setName) else: nMembers = int(nMembers) memberSizes = np.atleast_1d(memberSizes) if len(memberSizes) == 1: memberSizes = np.ones(nMembers) * memberSizes[0] else: if len(memberSizes) != nMembers: raise Error( "The supplied memberSizes list is not the correct length." ) self.pSet[setName] = procSet(setName, nMembers, memberSizes, len(self.pSet))
def setSurfaceCoordinates(self, coordinates): """Sets all surface coordinates on this processor Parameters ---------- coordinates : numpy array, size(N, 3) The coordinate to set. This MUST be exactly the same size as the array obtained from getSurfaceCoordinates() """ if len(coordinates) != self.nSurf: raise Error("Incorrect length of coordinates supplied to " "setSurfaceCoordinates on proc %d. Expected " "aray of length %d, received an array of length " "%d." % (self.comm.rank, self.nSurf, len(coordinates))) self.warp.setsurfacecoordinates(np.ravel(coordinates))
def writeCGNS(self, fileName): """After we have generated a grid, write it out in a properly formatted 1-Cell wide CGNS file suitable for running in SUmb.""" if not self.gridGenerated: raise Error("No grid has been generated! Run the run() " "command before trying to write the grid!") self.hyp.writecgns(fileName) # Possibly perform autoconnect using cgns_utils if self.comm.rank == 0 and self.getOption("autoConnect"): grid = readGrid(fileName) grid.connect() grid.writeToCGNS(fileName) self.comm.barrier()
def setOptProb(self, optProb): """ Set the optimization problem that this multiPoint object will be used for. This is required for this class to know how to assemble the gradients. If the optProb is not 'finished', it will done so here. Therefore, this function is collective on the comm that optProb is built on. multiPoint sparse does *not* hold a reference to optProb so no additional changes can be made to optProb after this function is called. Parameters ---------- optProb : pyOptSparse optimization problem class The optProb object to use """ optProb.finalizeDesignVariables() optProb.finalizeConstraints() # Since there is no distinction between objective(s) and # constraints just put everything in conKeys, including the # objective(s) for iCon in dkeys(optProb.constraints): if not optProb.constraints[iCon].linear: self.conKeys.add(iCon) self.outputWRT[iCon] = optProb.constraints[iCon].wrt self.outputSize[iCon] = optProb.constraints[iCon].ncon for iObj in dkeys(optProb.objectives): self.conKeys.add(iObj) self.outputWRT[iObj] = list(optProb.variables.keys()) self.outputSize[iObj] = 1 for dvGroup in dkeys(optProb.variables): ss = optProb.dvOffset[dvGroup] self.dvSize[dvGroup] = ss[1] - ss[0] self.conKeys = set(self.conKeys) # Check the dvsAsFuncs names to make sure they are *actually* # design variables and raise error for dv in self.dvsAsFuncs: if dv not in optProb.variables: raise Error(( "The supplied design variable '{}' in addDVsAsFunctions() call" + " does not exist in the supplied Optimization object." ).format(dv))
def createGroups(sizes, comm): """ Create groups takes a list of sizes, and creates new MPI communicators coorsponding to those sizes. This is typically used for generating the communicators for an aerostructural analysis. Parameters ---------- sizes : list or array List or integer array of the sizes of each split comm comm : MPI intracomm The communicator to split. comm.size must equal sum(sizes) """ nGroups = len(sizes) nProc_total = sum(sizes) if not (comm.size == nProc_total): raise Error( "Cannot split comm. Comm has %d processors, but requesting to split into %d." % (comm.size, nProc_total)) # Create a cumulative size array cumGroups = [0] * (nGroups + 1) cumGroups[0] = 0 for igroup in range(nGroups): cumGroups[igroup + 1] = cumGroups[igroup] + sizes[igroup] # Determine the member_key for each processor for igroup in range(nGroups): if comm.rank >= cumGroups[igroup] and comm.rank < cumGroups[igroup + 1]: member_key = igroup new_comm = comm.Split(member_key) flags = [False] * nGroups flags[member_key] = True return new_comm, flags
def __init__(self, initType, *args, **kwargs): self.initType = initType print("pyGeo Initialization Type is: %s" % (initType)) # ------------------- pyGeo Class Attributes ----------------- self.topo = None # The topology of the surfaces self.surfs = [] # The list of surface (pySpline surf) # objects self.nSurf = None # The total number of surfaces self.coef = None # The global (reduced) set of control # points if initType == "plot3d": self._readPlot3D(*args, **kwargs) elif initType == "iges": self._readIges(*args, **kwargs) elif initType == "liftingSurface": self._init_lifting_surface(*args, **kwargs) elif initType == "create": # Don't do anything pass else: raise Error("Unknown init type. Valid Init types are 'plot3d', 'iges' and 'liftingSurface'")
def __init__(self, comm=None, options=None, debug=False): # Load the compiled module using MExt, allowing multiple # imports curDir = os.path.dirname(os.path.realpath(__file__)) self.adflow = MExt.MExt("libadflow", [curDir], debug=debug)._module # Information for base class: name = "ADFLOW" category = "Three Dimensional CFD" informs = {} # If 'options' is not None, go through and make sure all keys # are lower case: if options is not None: for key in options.keys(): options[key.lower()] = options.pop(key) else: raise Error( "The 'options' keyword argument must be passed " "adflow. The options dictionary must contain (at least) " "the gridFile entry for the grid") # Load all the option/objective/DV information: defOpts = self._getDefOptions() self.optionMap = self._getOptionMap() self.ignoreOptions, self.deprecatedOptions, self.specialOptions = self._getSpecialOptionLists( ) self.possibleAeroDVs, self.basicCostFunctions = self._getObjectivesAndDVs( ) # This is the real solver so dtype is 'd' self.dtype = "d" # Next set the MPI Communicators and associated info if comm is None: comm = MPI.COMM_WORLD self.comm = comm self.adflow.communication.adflow_comm_world = self.comm.py2f() self.adflow.communication.adflow_comm_self = MPI.COMM_SELF.py2f() self.adflow.communication.sendrequests = numpy.zeros(self.comm.size) self.adflow.communication.recvrequests = numpy.zeros(self.comm.size) self.myid = self.adflow.communication.myid = self.comm.rank self.adflow.communication.nproc = self.comm.size # Initialize the inherited aerosolver AeroSolver.__init__(self, name, category, defOpts, informs, options=options) # Initialize petec in case the user has not already self.adflow.initializepetsc() # Set the stand-alone adflow flag to false...this changes how # terminate calls are handled. self.adflow.iteration.standalonemode = False # Set the frompython flag to true... this also changes how # terminate calls are handled self.adflow.killsignals.frompython = True # Set default values self.adflow.setdefaultvalues() self.adflow.inputio.autoparameterupdate = False # Make sure all the params are ok for option in self.options: if option != "defaults": self.setOption(option.lower(), self.options[option][1]) dummyAP = AeroProblem( name="dummy", mach=0.5, altitude=10000.0, areaRef=1.0, chordRef=1.0, alpha=0.0, degreePol=0, coefPol=[0, 0], degreeFourier=1, omegaFourier=6.28, sinCoefFourier=[0, 0], cosCoefFourier=[0, 0], ) self.curAP = dummyAP self._setAeroProblemData(firstCall=True) # Finally complete loading self.adflow.dummyreadparamfile() self.adflow.partitionandreadgrid(False) self.adflow.preprocessingcustomoverset()
def sens(self, x, funcs): """ This is a built-in sensitivity function that is designed to be used directly as a the sensitivity function with pyOptSparse. The user should not use this function directly, instead see the class documentation for the intended usage. Parameters ---------- x : dict Dictionary of variables returned from pyOptSparse """ for key in dkeys(self.pSet): if self.setFlags[key]: # Run "sens" function to functionals sensitivities res = {"fail": False} for func in self.pSet[key].sensFunc: tmp = func(x, funcs) if tmp is None: raise Error(( "No return from user supplied sensitivity function for pSet {}. " + "Functional derivatives must be returned in a dictionary." ).format(key)) if "fail" in tmp: res["fail"] = bool(tmp.pop("fail") or res["fail"]) res.update(tmp) if self.sensCommPattern is None: # On the first pass we need to determine the (one-time) # communication pattern # Send all the keys allKeys = self.gcomm.allgather(sorted(list(res.keys()))) self.sensCommPattern = dict() for i in range(len(allKeys)): # This is looping over processors for key in allKeys[i]: # This loops over keys from proc if key not in self.sensCommPattern: if key != "fail": # Only add on the lowest proc and ignore on higher # ones self.sensCommPattern[key] = i # Perform Communication of functional (derivatives) funcSens = dict() for key in dkeys(self.sensCommPattern): if self.sensCommPattern[key] == self.gcomm.rank: tmp = self.gcomm.bcast(res[key], root=self.sensCommPattern[key]) else: tmp = self.gcomm.bcast(None, root=self.sensCommPattern[key]) funcSens[key] = tmp # Simply do an allReduce on the fail flag: fail = self.gcomm.allreduce(res["fail"], op=MPI.LOR) # Add in the sensitivity of the extra DVs as Funcs...This will # just be an identity matrix for dv in self.dvsAsFuncs: if np.isscalar(x[dv]) or len(np.atleast_1d(x[dv])) == 1: funcSens[dv] = {dv: np.eye(1)} else: funcSens[dv] = {dv: np.eye(len(x[dv]))} # Now we have to perform the CS loop over the user-supplied # objCon function to generate the derivatives of our final # constraints (and objective(s)) with respect to the # intermediate functionals. We will put everything in gcon # (including the objective) gcon = {} # Extract/Complexify just the keys we need: passThroughFuncs = _extractKeys(self.funcs, self.passThroughKeys) cFuncs = _extractKeys(self.funcs, self.inputKeys) cFuncs = _complexifyFuncs(cFuncs, self.inputKeys) # Just copy the passthrough keys and keys that are both inputs and constrains: for pKey in self.passThroughKeys: gcon[pKey] = funcSens[pKey] for cKey in self.consAsInputs: gcon[cKey] = funcSens[cKey] # Setup zeros for the output keys: for oKey in skeys(self.outputKeys): gcon[oKey] = {} # Only loop over the DVsets that this constraint has: for dvSet in self.outputWRT[oKey]: gcon[oKey][dvSet] = np.zeros( (self.outputSize[oKey], self.dvSize[dvSet])) for iKey in skeys(self.inputKeys): # Keys to peturb: if np.isscalar(cFuncs[iKey]) or len(np.atleast_1d( cFuncs[iKey])) == 1: cFuncs[iKey] += 1e-40j con = self._userObjConWrap(cFuncs, False, passThroughFuncs) cFuncs[iKey] -= 1e-40j # Extract the derivative of output key variables for oKey in skeys(self.outputKeys): n = self.outputSize[oKey] for dvSet in self.outputWRT[oKey]: if dvSet in funcSens[iKey]: deriv = (np.imag(np.atleast_1d(con[oKey])) / 1e-40).reshape((n, 1)) gcon[oKey][dvSet] += np.dot( deriv, np.atleast_2d(funcSens[iKey][dvSet])) else: for i in range(len(cFuncs[iKey])): cFuncs[iKey][i] += 1e-40j con = self._userObjConWrap(cFuncs, False, passThroughFuncs) cFuncs[iKey][i] -= 1e-40j # Extract the derivative of output key variables for oKey in skeys(self.outputKeys): n = self.outputSize[oKey] for dvSet in self.outputWRT[oKey]: if dvSet in funcSens[iKey]: deriv = (np.imag(np.atleast_1d(con[oKey])) / 1e-40).reshape((n, 1)) gcon[oKey][dvSet] += np.dot( deriv, np.atleast_2d(funcSens[iKey][dvSet][i, :])) gcon = self.gcomm.bcast(gcon, root=0) fail = self.gcomm.bcast(fail, root=0) return gcon, fail
def addVariable(self, component, group, parm, value=None, lower=None, upper=None, scale=1.0, scaledStep=True, dh=1e-6): """ Add a design variable definition. Parameters ---------- component : str Name of the VSP component group : str Name of the VSP group parm : str Name of the VSP parameter value : float or None The design variable. If this value is not supplied (None), then the current value in the VSP model will be queried and used lower : float or None Lower bound for the design variable. Use None for no lower bound upper : float or None Upper bound for the design variable. Use None for no upper bound scale : float Scale factor scaledStep : bool Flag to use a scaled step sized based on the initial value of the variable. It will remain constant thereafter. dh : float Step size. When scaledStep is True, the actual step is dh*value. Otherwise this actual step is used. """ container_id = openvsp.FindContainer(component, 0) if container_id == "": raise Error("Bad component for DV: %s" % component) parm_id = openvsp.FindParm(container_id, parm, group) if parm_id == "": raise Error(f"Bad group or parm: {component} {group} {parm}") # Now we know the parmID is ok. So we just get the value val = openvsp.GetParmVal(parm_id) dvName = f"{component}:{group}:{parm}" if value is None: value = val if scaledStep: dh = dh * value if value == 0: raise Error( "Initial value is exactly 0. scaledStep option cannot be used" "Specify an explicit dh with scaledStep=False") self.DVs[dvName] = vspDV(parm_id, component, group, parm, value, lower, upper, scale, dh)
def evalCurvAreaSens(self, iSurf): """ Compute sensitivity of the integral K**2 wrt the coordinate locations X """ # Evaluate the derivative of the position vector of every point on the # surface wrt to the parameteric corrdinate u and v t_u = self.evalDiff(iSurf, self.X[iSurf], "u") Dt_uDX = self.evalDiffSens(iSurf, "u") t_v = self.evalDiff(iSurf, self.X[iSurf], "v") Dt_vDX = self.evalDiffSens(iSurf, "v") # Compute the normal vector by taking the cross product of t_u and t_v n = self.evalCross(iSurf, t_u, t_v) [DnDt_u, DnDt_v] = self.evalCrossSens(iSurf, t_u, t_v) DnDX = DnDt_u.dot(Dt_uDX) + DnDt_v.dot(Dt_vDX) # Compute the norm of tu_ x tv n_norm = self.evalNorm(iSurf, n) Dn_normDn = self.evalNormSens(iSurf, n) Dn_normDX = Dn_normDn.dot(DnDX) # Normalize the normal vector n_hat = np.zeros_like(n) n_hat[self.X_map[iSurf][:, :, 0]] = n[ self.X_map[iSurf][:, :, 0]] / n_norm[self.node_map[iSurf][:, :]] n_hat[self.X_map[iSurf][:, :, 1]] = n[ self.X_map[iSurf][:, :, 1]] / n_norm[self.node_map[iSurf][:, :]] n_hat[self.X_map[iSurf][:, :, 2]] = n[ self.X_map[iSurf][:, :, 2]] / n_norm[self.node_map[iSurf][:, :]] ii = [] data = [] for i in range(3): # Dn_hat[self.X_map[iSurf][:,:,i]]/Dn[self.X_map[iSurf][:,:,i]] ii += list(np.reshape(self.X_map[iSurf][:, :, i], -1)) data += list(np.reshape(n_norm[self.node_map[iSurf][:, :]]**-1, -1)) Dn_hatDn = csr_matrix((data, [ii, ii]), shape=(self.X[iSurf].size, self.X[iSurf].size)) ii = [] jj = [] data = [] for i in range(3): # Dn_hat[self.X_map[iSurf][:,:,i]]/Dn_norm[self.node_map[iSurf][:,:]] ii += list(np.reshape(self.X_map[iSurf][:, :, i], -1)) jj += list(np.reshape(self.node_map[iSurf][:, :], -1)) data += list( np.reshape( -n[self.X_map[iSurf][:, :, i]] / (n_norm[self.node_map[iSurf][:, :]]**2), -1)) Dn_hatDn_norm = csr_matrix((data, [ii, jj]), shape=(n_hat.size, n_norm.size)) Dn_hatDX = Dn_hatDn.dot(DnDX) + Dn_hatDn_norm.dot(Dn_normDX) # Evaluate the second derivatives of the position vector wrt u and v t_uu = self.evalDiff(iSurf, t_u, "u") Dt_uuDt_u = self.evalDiffSens(iSurf, "u") Dt_uuDX = Dt_uuDt_u.dot(Dt_uDX) t_vv = self.evalDiff(iSurf, t_v, "v") Dt_vvDt_v = self.evalDiffSens(iSurf, "v") Dt_vvDX = Dt_vvDt_v.dot(Dt_vDX) t_uv = self.evalDiff(iSurf, t_v, "u") Dt_uvDt_v = self.evalDiffSens(iSurf, "u") Dt_uvDX = Dt_uvDt_v.dot(Dt_vDX) # Compute the components of the first fundamental form of a parameteric # surface E = self.evalInProd(iSurf, t_u, t_u) [DEDt_u, _] = self.evalInProdSens(iSurf, t_u, t_u) DEDt_u *= 2 DEDX = DEDt_u.dot(Dt_uDX) F = self.evalInProd(iSurf, t_v, t_u) [DFDt_v, DFDt_u] = self.evalInProdSens(iSurf, t_v, t_u) DFDX = DFDt_v.dot(Dt_vDX) + DFDt_u.dot(Dt_uDX) G = self.evalInProd(iSurf, t_v, t_v) [DGDt_v, _] = self.evalInProdSens(iSurf, t_v, t_v) DGDt_v *= 2 DGDX = DGDt_v.dot(Dt_vDX) # Compute the components of the second fundamental form of a parameteric # surface L = self.evalInProd(iSurf, t_uu, n_hat) [DLDt_uu, DLDn_hat] = self.evalInProdSens(iSurf, t_uu, n_hat) DLDX = DLDt_uu.dot(Dt_uuDX) + DLDn_hat.dot(Dn_hatDX) M = self.evalInProd(iSurf, t_uv, n_hat) [DMDt_uv, DMDn_hat] = self.evalInProdSens(iSurf, t_uv, n_hat) DMDX = DMDt_uv.dot(Dt_uvDX) + DMDn_hat.dot(Dn_hatDX) N = self.evalInProd(iSurf, t_vv, n_hat) [DNDt_vv, DNDn_hat] = self.evalInProdSens(iSurf, t_vv, n_hat) DNDX = DNDt_vv.dot(Dt_vvDX) + DNDn_hat.dot(Dn_hatDX) # Compute Gaussian and mean curvature (K and H) K = (L * N - M * M) / (E * G - F * F) DKDE = self.diags(-(L * N - M * M) / (E * G - F * F)**2 * G) DKDF = self.diags((L * N - M * M) / (E * G - F * F)**2 * 2 * F) DKDG = self.diags(-(L * N - M * M) / (E * G - F * F)**2 * E) DKDL = self.diags(N / (E * G - F * F)) DKDM = self.diags(2 * M / (E * G - F * F)) DKDN = self.diags(L / (E * G - F * F)) DKDX = DKDE.dot(DEDX) + DKDF.dot(DFDX) + DKDG.dot(DGDX) + DKDL.dot( DLDX) + DKDM.dot(DMDX) + DKDN.dot(DNDX) H = (E * N - 2 * F * M + G * L) / (2 * (E * G - F * F)) DHDE = self.diags(N / (2 * (E * G - F * F)) - (E * N - 2 * F * M + G * L) / (2 * (E * G - F * F))**2 * 2 * G) DHDF = self.diags(-2 * M / (2 * (E * G - F * F)) + (E * N - 2 * F * M + G * L) / (2 * (E * G - F * F))**2 * 4 * F) DHDG = self.diags(L / (2 * (E * G - F * F)) - (E * N - 2 * F * M + G * L) / (2 * (E * G - F * F))**2 * 2 * E) DHDL = self.diags(G / (2 * (E * G - F * F))) DHDM = self.diags(-2 * F / (2 * (E * G - F * F))) DHDN = self.diags(E / (2 * (E * G - F * F))) DHDX = DHDE.dot(DEDX) + DHDF.dot(DFDX) + DHDG.dot(DGDX) + DHDL.dot( DLDX) + DHDM.dot(DMDX) + DHDN.dot(DNDX) # Assign integration weights for each point # 1 for center nodes # 1/2 for edge nodes # 1/4 for corner nodes wt = np.zeros_like(n_norm) + 1 wt[self.node_map[iSurf][0, :]] *= 0.5 wt[self.node_map[iSurf][-1, :]] *= 0.5 wt[self.node_map[iSurf][:, 0]] *= 0.5 wt[self.node_map[iSurf][:, -1]] *= 0.5 # Compute discrete area associated with each node dS = wt * n_norm DdSDX = self.diags(wt).dot(Dn_normDX) one = np.ones(self.node_map[iSurf].size) if self.curvatureType == "Gaussian": # Now compute integral (K**2) over S, equivalent to sum(K**2*dS) # kS = np.dot(one, K * K * dS) DkSDX = (self.diags(2 * K * dS).dot(DKDX) + self.diags(K * K).dot(DdSDX)).T.dot(one) return DkSDX elif self.curvatureType == "mean": # Now compute integral (H**2) over S, equivalent to sum(H**2*dS) # hS = np.dot(one, H * H * dS) DhSDX = (self.diags(2 * H * dS).dot(DHDX) + self.diags(H * H).dot(DdSDX)).T.dot(one) return DhSDX elif self.curvatureType == "combined": # Now compute dcSDX. Note: cS= sum( (4*H*H-2*K)*dS ), DcSDX = term1 - term2 # where term1 = sum( 8*H*DHDX*dS + 4*H*H*DdSdX ), term2 = sum( 2*DKDX*dS + 2*K*DdSdX ) term1 = (self.diags(8 * H * dS).dot(DHDX) + self.diags(4 * H * H).dot(DdSDX)).T.dot(one) term2 = (self.diags(2 * dS).dot(DKDX) + self.diags(2 * K).dot(DdSDX)).T.dot(one) DcSDX = term1 - term2 return DcSDX elif self.curvatureType == "KSmean": sigmaH = np.dot(one, np.exp(self.KSCoeff * H * H * dS)) DhSDX = (self.diags(2 * H * dS / sigmaH * np.exp(self.KSCoeff * H * H * dS)).dot(DHDX) + self.diags(H * H / sigmaH * np.exp( self.KSCoeff * H * H * dS)).dot(DdSDX)).T.dot(one) return DhSDX else: raise Error( "The curvatureType parameter should be Gaussian, mean, or combined, " "%s is not supported!" % self.curvatureType)
def evalCurvArea(self, iSurf): """ Evaluate the integral K**2 over the surface area of the wing. Where K is the Gaussian curvature. """ # Evaluate the derivative of the position vector of every point on the # surface wrt to the parameteric corrdinate u and v t_u = self.evalDiff(iSurf, self.X[iSurf], "u") t_v = self.evalDiff(iSurf, self.X[iSurf], "v") # Compute the normal vector by taking the cross product of t_u and t_v n = self.evalCross(iSurf, t_u, t_v) # Compute the norm of tu_ x tv n_norm = self.evalNorm(iSurf, n) # Normalize the normal vector n_hat = np.zeros_like(n) n_hat[self.X_map[iSurf][:, :, 0]] = n[ self.X_map[iSurf][:, :, 0]] / n_norm[self.node_map[iSurf][:, :]] n_hat[self.X_map[iSurf][:, :, 1]] = n[ self.X_map[iSurf][:, :, 1]] / n_norm[self.node_map[iSurf][:, :]] n_hat[self.X_map[iSurf][:, :, 2]] = n[ self.X_map[iSurf][:, :, 2]] / n_norm[self.node_map[iSurf][:, :]] # Evaluate the second derivatives of the position vector wrt u and v t_uu = self.evalDiff(iSurf, t_u, "u") t_vv = self.evalDiff(iSurf, t_v, "v") t_uv = self.evalDiff(iSurf, t_v, "u") # Compute the components of the first fundamental form of a parameteric # surface E = self.evalInProd(iSurf, t_u, t_u) F = self.evalInProd(iSurf, t_v, t_u) G = self.evalInProd(iSurf, t_v, t_v) # Compute the components of the second fundamental form of a parameteric # surface L = self.evalInProd(iSurf, t_uu, n_hat) M = self.evalInProd(iSurf, t_uv, n_hat) N = self.evalInProd(iSurf, t_vv, n_hat) # Compute Gaussian and mean curvature (K and H) K = (L * N - M * M) / (E * G - F * F) H = (E * N - 2 * F * M + G * L) / (2 * (E * G - F * F)) # Compute the combined curvature (C) C = 4.0 * H * H - 2.0 * K # Assign integration weights for each point # 1 for center nodes # 1/2 for edge nodes # 1/4 for corner nodes wt = np.zeros_like(n_norm) + 1 wt[self.node_map[iSurf][0, :]] *= 0.5 wt[self.node_map[iSurf][-1, :]] *= 0.5 wt[self.node_map[iSurf][:, 0]] *= 0.5 wt[self.node_map[iSurf][:, -1]] *= 0.5 # Compute discrete area associated with each node dS = wt * n_norm one = np.ones(self.node_map[iSurf].size) if self.curvatureType == "Gaussian": # Now compute integral (K**2) over S, equivalent to sum(K**2*dS) kS = np.dot(one, K * K * dS) return [kS, K, H, C] elif self.curvatureType == "mean": # Now compute integral (H**2) over S, equivalent to sum(H**2*dS) hS = np.dot(one, H * H * dS) return [hS, K, H, C] elif self.curvatureType == "combined": # Now compute integral C over S, equivalent to sum(C*dS) cS = np.dot(one, C * dS) return [cS, K, H, C] elif self.curvatureType == "KSmean": # Now compute the KS function for mean curvature, equivalent to KS(H*H*dS) sigmaH = np.dot(one, np.exp(self.KSCoeff * H * H * dS)) KSmean = np.log(sigmaH) / self.KSCoeff if MPI.COMM_WORLD.rank == 0: print("Max curvature: ", max(H * H * dS)) return [KSmean, K, H, C] else: raise Error( "The curvatureType parameter should be Gaussian, mean, or combined, " "%s is not supported!" % self.curvatureType)
def evalFunctionsSens(self, funcsSens, config): """ Evaluate the sensitivity of the functions this object has and place in the funcsSens dictionary Parameters ---------- funcsSens : dict Dictionary to place function values """ # we need to hand derive the derivatives of the curvature with respect to the projected points nDV = self.DVGeo.getNDV() if nDV > 0: dC2dPt = np.zeros((self.coords.shape[0], self.coords.shape[1])) if self.curvatureType == "mean": tmp = 2 / (self.nPts - 2) / self.eps / self.eps for i in range(self.nPts): for j in range(3): if i == 0: dC2dPt[i][j] = self.axis[j] * tmp * (self.C[i + 1]) elif i == 1: dC2dPt[i][j] = self.axis[j] * tmp * ( -2 * self.C[i] + self.C[i + 1]) elif i == self.nPts - 1: dC2dPt[i][j] = self.axis[j] * tmp * (self.C[i - 1]) elif i == self.nPts - 2: dC2dPt[i][j] = self.axis[j] * tmp * ( -2 * self.C[i] + self.C[i - 1]) else: dC2dPt[i][j] = self.axis[j] * tmp * ( self.C[i + 1] - 2 * self.C[i] + self.C[i - 1]) elif self.curvatureType == "aggregated": eSum = 0.0 for i in range(1, self.nPts - 1): eSum += np.exp(self.KSCoeff * self.C[i] * self.C[i]) tmp = 2 / eSum / self.eps / self.eps for i in range(self.nPts): for j in range(3): if i == 0: dC2dPt[i][j] = ( self.axis[j] * tmp * (self.C[i + 1] * np.exp(self.KSCoeff * self.C[i + 1] * self.C[i + 1]))) elif i == 1: dC2dPt[i][j] = ( self.axis[j] * tmp * (-2 * self.C[i] * np.exp(self.KSCoeff * self.C[i] * self.C[i]) + self.C[i + 1] * np.exp(self.KSCoeff * self.C[i + 1] * self.C[i + 1]))) elif i == self.nPts - 1: dC2dPt[i][j] = ( self.axis[j] * tmp * (self.C[i - 1] * np.exp(self.KSCoeff * self.C[i - 1] * self.C[i - 1]))) elif i == self.nPts - 2: dC2dPt[i][j] = ( self.axis[j] * tmp * (-2 * self.C[i] * np.exp(self.KSCoeff * self.C[i] * self.C[i]) + self.C[i - 1] * np.exp(self.KSCoeff * self.C[i - 1] * self.C[i - 1]))) else: dC2dPt[i][j] = ( self.axis[j] * tmp * (self.C[i + 1] * np.exp(self.KSCoeff * self.C[i + 1] * self.C[i + 1]) - 2 * self.C[i] * np.exp(self.KSCoeff * self.C[i] * self.C[i]) + self.C[i - 1] * np.exp(self.KSCoeff * self.C[i - 1] * self.C[i - 1]))) else: raise Error( "curvatureType=%s not supported! Options are: mean or aggregated" % self.curvatureType) funcsSens[self.name] = self.DVGeo.totalSensitivity(dC2dPt, self.name, config=config)
def _setSymmetryConditions(self): """This function determines the symmetry planes used for the computation. It has a similar structure to setInternalSurface. Symmetry planes are specified throught 'symmetryPlanes' option, which has the form 'symmetryPlanes':[[[x1,y1, z1],[dir_x1, dir_y1, dir_z1]],[[x2,y2, z2],[dir_x2, dir_y2, dir_z2]],...] Examples -------- meshOptions = {'symmetryPlanes':[[[0.,0., 0.],[0., 1., 0.]]]} mesh = USMesh(options=meshOptions,comm=gcomm) """ symmList = self.getOption("symmetryPlanes") if symmList is not None: # The user has explictly supplied symmetry planes. Use those pts = [] normals = [] for i in range(len(symmList)): pts.append(symmList[i][0]) normals.append(symmList[i][1]) else: # Otherwise generate from the geometry. planes = [] if self.fileType == "CGNS": if self.comm.rank == 0: # Do the necessary fortran preprocessing if self.warp.cgnsgrid.cgnsstructured: self.warp.processstructuredpatches() else: self.warp.processunstructuredpatches() fullConn = self.warp.cgnsgrid.surfaceconn - 1 fullPts = self.warp.cgnsgrid.surfacepoints nPatch = self.warp.cgnsgrid.getnpatch() fullPatchNames = [] for i in range(nPatch): fullPatchNames.append( self.warp.cgnsgrid.getsurf(i + 1).strip().lower()) symmFamilies = set() if self.getOption("symmetrySurfaces") is None: # Use all symmetry surfaces: for i in range(len(self.warp.cgnsgrid.surfaceissymm)): if self.warp.cgnsgrid.surfaceissymm[i]: symmFamilies.add(fullPatchNames[i].lower()) else: # The user has supplied a list of surface families for name in self.getOption("symmetrySurfaces"): symmFamilies.add(name.lower()) usedFams = set() if self.warp.cgnsgrid.cgnsstructured: if self.comm.rank == 0: # Pull out data and convert to 0-based ordering fullPatchSizes = self.warp.cgnsgrid.surfacesizes.T # Now we loop back over the "full" versions of # thing and just take the ones that correspond # to the families we are using. curNodeIndex = 0 curCellIndex = 0 curOffset = 0 for i in range(len(fullPatchNames)): curNodeSize = fullPatchSizes[i][ 0] * fullPatchSizes[i][1] curCellSize = (fullPatchSizes[i][0] - 1) * (fullPatchSizes[i][1] - 1) if fullPatchNames[i] in symmFamilies: # Keep track of the families we've actually used usedFams.add(fullPatchNames[i]) # Determine the average normal for these points: conn = (fullConn[curCellIndex:curCellIndex + curCellSize * 4] - fullConn[curCellIndex] + 1) pts = fullPts[curNodeIndex:curNodeIndex + curNodeSize * 3] avgNorm = self.warp.averagenormal( pts, conn, 4 * np.ones(curCellSize, "intc")) planes.append([pts[0:3], avgNorm]) else: # If we skipped, we increment the offset curOffset += curNodeSize curNodeIndex += curNodeSize * 3 curCellIndex += curCellSize * 4 # end for (root proc) else: # unstructured # We won't do this in general. The issue is that # each of the elements needs to be checked # individually since one sym BC may have multiple # actual symmetry planes. This could be done, but # since plane elemination code is in python and # slow, we'll just defer this and make the user # supply the symmetry planes. raise Error( "Automatic determine of symmetry surfaces is " "not supported for unstructured CGNS. Please " "specify the symmetry planes using the " "'symmetryPlanes' option. See the _setSymmetryConditions()" " documentation string for the option prototype.") # Check if all supplied family names were actually # used. The user probably wants to know if a family # name was specified incorrectly. if self.comm.rank == 0: if usedFams < symmFamilies: missing = list(symmFamilies - usedFams) warnings.warn("Not all specified symm families that " "were given were found the CGNS file. " "The families not found are %s." % (repr(missing))) elif self.fileType in ["OpenFOAM", "PLOT3D"]: # We could probably implement this at some point, but # it is not critical raise Error( "Automatic determine of symmetry surfaces is " "not supported for OpenFOAM or PLOT3D meshes. Please " "specify the symmetry planes using the " "'symmetryPlanes' option. See the _setSymmetryConditions()" " documentation string for the option prototype.") # Now we have a list of planes. We have to reduce them to the # set of independent planes. This is tricky since you can have # have to different normals belonging to the same physical # plane. Since we don't have that many, we just use a dumb # double loop. def checkPlane(p1, n1, p2, n2): # Determine if two planes defined by (pt, normal) are # actually the same up to a normal sign. # First check the normal...if these are not the same, # cannot be the same plane if abs(np.dot(n1, n2)) < 0.99: return False # Normals are the same direction. Check if p2 is on the # first plane up to a tolerance. d = p2 - p1 d1 = p2 - np.dot(d, n1) * n1 if np.linalg.norm(d1 - p2) / (np.linalg.norm(d) + 1e-30) > 1e-8: return False return True uniquePlanes = [] flagged = np.zeros(len(planes), "intc") for i in range(len(planes)): if not flagged[i]: uniquePlanes.append(planes[i]) curPlane = planes[i] # Loop over remainder to check: for j in range(i + 1, len(planes)): if checkPlane(curPlane[0], curPlane[1], planes[j][0], planes[j][1]): flagged[j] = 1 # Before we return, reset the point for each plane to be as # close to the origin as possible. This will help slightly # with the numerics. pts = [] normals = [] for i in range(len(uniquePlanes)): p = uniquePlanes[i][0] n = uniquePlanes[i][1] p2 = np.zeros(3) d = p2 - p pstar = p2 - np.dot(d, n) * n normals.append(n) pts.append(pstar) normals = self.comm.bcast(np.array(normals)) pts = self.comm.bcast(np.array(pts)) # Let the user know what they are: if self.comm.rank == 0: print("+-------------------- Symmetry Planes -------------------+") print("| Point Normal |") for i in range(len(pts)): print("| (%7.3f %7.3f %7.3f) (%7.3f %7.3f %7.3f) |" % ( np.real(pts[i][0]), np.real(pts[i][1]), np.real(pts[i][2]), np.real(normals[i][0]), np.real(normals[i][1]), np.real(normals[i][2]), )) print("+--------------------------------------------------------+") # Now set the data into fortran. self.warp.setsymmetryplanes(pts.T, normals.T)
def _setInternalSurface(self): """This function is used by default if setSurfaceDefinition() is not set BEFORE an operation is requested that requires this information. """ if self.warpInitialized: return if self.comm.rank == 0: warnings.warn( "Using internally generated IDWarp surfaces. If " "this mesh object is to be used with an " "external solver, ensure the mesh object is " "passed to the solver immediatedly after it is created. " "The external solver must then call " "'setExternalMeshIndices()' and 'setSurfaceDefinition()' " "routines.") conn = [] pts = [] faceSizes = [] if self.fileType == "CGNS": if self.comm.rank == 0: # Do the necessary fortran preprocessing if self.warp.cgnsgrid.cgnsstructured: self.warp.processstructuredpatches() else: self.warp.processunstructuredpatches() fullConn = self.warp.cgnsgrid.surfaceconn - 1 fullPts = self.warp.cgnsgrid.surfacepoints nPatch = self.warp.cgnsgrid.getnpatch() fullPatchNames = [] for i in range(nPatch): fullPatchNames.append( self.warp.cgnsgrid.getsurf(i + 1).strip().lower()) # We now have all surfaces belonging to # boundary conditions. We need to decide which # ones to use depending on what the user has # told us. surfaceFamilies = set() if self.getOption("specifiedSurfaces") is None: # Use all wall surfaces: for i in range(len(self.warp.cgnsgrid.surfaceiswall)): if self.warp.cgnsgrid.surfaceiswall[i]: surfaceFamilies.add(fullPatchNames[i].lower()) else: # The user has supplied a list of surface families for name in self.getOption("specifiedSurfaces"): surfaceFamilies.add(name.lower()) usedFams = set() if self.warp.cgnsgrid.cgnsstructured: if self.comm.rank == 0: # Pull out data and convert to 0-based ordering fullPatchSizes = self.warp.cgnsgrid.surfacesizes.T # Now we loop back over the "full" versions of # thing and just take the ones that correspond # to the families we are using. curNodeIndex = 0 curCellIndex = 0 curOffset = 0 for i in range(len(fullPatchNames)): curNodeSize = fullPatchSizes[i][0] * fullPatchSizes[i][ 1] curCellSize = (fullPatchSizes[i][0] - 1) * (fullPatchSizes[i][1] - 1) if fullPatchNames[i] in surfaceFamilies: # Keep track of the families we've actually used usedFams.add(fullPatchNames[i]) pts.extend(fullPts[curNodeIndex:curNodeIndex + curNodeSize * 3]) conn.extend(fullConn[curCellIndex:curCellIndex + curCellSize * 4] - curOffset) else: # If we skipped, we increment the offset curOffset += curNodeSize curNodeIndex += curNodeSize * 3 curCellIndex += curCellSize * 4 # end for (root proc) # Run the common surface definition routine pts = np.array(pts).reshape((len(pts) // 3, 3)) faceSizes = 4 * np.ones(len(conn) // 4, "intc") self.setSurfaceDefinition(pts=pts, conn=np.array(conn, "intc"), faceSizes=faceSizes) else: # unstructured if self.comm.rank == 0: # Pull out data and convert to 0-based ordering fullPtr = self.warp.cgnsgrid.surfaceptr - 1 fullPatchPtr = self.warp.cgnsgrid.surfacepatchptr - 1 fullFaceSizes = fullPtr[1:-1] - fullPtr[0:-2] # Now we loop back over the "full" versions of # thing and just take the ones that correspond # to the families we are using. curOffset = 0 for i in range(len(fullPatchNames)): # Start/end indices into fullPtr array iStart = fullPatchPtr[i] iEnd = fullPatchPtr[i + 1] # Start/end indices into the fullConn/fullPts array iStart2 = fullPtr[iStart] iEnd2 = fullPtr[iEnd] if fullPatchNames[i] in surfaceFamilies: # Keep track of the families we've actually used usedFams.add(fullPatchNames[i]) faceSizes.extend(fullFaceSizes[iStart:iEnd]) conn.extend(fullConn[iStart2:iEnd2] - curOffset) pts.extend(fullPts[iStart2 * 3:iEnd2 * 3]) else: # If we skipped, we increment the offset curOffset += iEnd2 - iStart2 pts = np.array(pts).reshape((len(pts) // 3, 3)) # Run the common surface definition routine self.setSurfaceDefinition(pts=pts, conn=np.array(conn, "intc"), faceSizes=faceSizes) # Check if all supplied family names were actually # used. The user probably wants to know if a family # name was specified incorrectly. if self.comm.rank == 0: if usedFams < surfaceFamilies: missing = list(surfaceFamilies - usedFams) warnings.warn("Not all specified surface families that " "were given were found the CGNS file. " "The families not found are %s." % (repr(missing))) if len(usedFams) == 0: raise Error("No surfaces were selected. Check the names " "given in the 'specifiedSurface' option. The " "list of families is %s." % (repr(list(fullPatchNames)))) elif self.fileType == "OpenFOAM": faceSizes, conn, pts = self._computeOFConn() # Run the "external" command self.setSurfaceDefinition(pts=pts, conn=conn, faceSizes=faceSizes)
def __init__(self, CGNSFile, optionsDict, comm=None, dtype="d", debug=False): """ Create the MultiUSMesh object. INPUTS: CGNSFile: string -> file name of the CGNS file. This CGNS file should be generated with cgns_utils combine, so that the domain names have the appropriate convention. That is, domains will have the same name as their original files. Domains that share the same name will be grouped to make an IDWarp instance. optionsDict: dictionary of dictionaries -> Dictionary containing dictionaries that will be used to initialize multiple IDWarp instances. The keys are domain names and the values are dictionaries of standard IDWarp options that will be applied to this domain. The domains of the full CGNS file that do not have a corresponding entry in optionsDict will not be warped. For instance, if the CGNS file has the domains wing.00000, wing.00001, and wing.00002 associated with a wing mesh that we want to warp, then optionsDict should have an entry for 'wing'. Ney Secco 2017-02 """ # Check if cs was imported correctly: if cs is None: raise Error("cgns_utils could not be loaded correctly. MultiUSMesh " "requires cgns_utils to function.") # Assign communicator if we do not have one yet if comm is None: comm = MPI.COMM_WORLD # Check if warp has already been set by the complex version try: self.warp except AttributeError: curDir = os.path.basename(os.path.dirname(os.path.realpath(__file__))) self.warp = MExt("libidwarp", curDir, debug=debug)._module # Store communicator self.comm = comm # Store original file name self.CGNSFile = CGNSFile # Store scalar type self.dtype = dtype # Only the root processor will take the combined CGNS file # and explode it by instance. if self.myID == 0: # Initialize list to store the block IDs that belong to each IDWarp instance. # For example, suppose that our combined CGNS file has 21 blocks. # Blocks 1 to 5 belong to the fuselage # Blocks 6 to 12 belong to the wing # Blocks 13 to 21 belong to the background mesh # Then cgnsBlockIntervals = [[0,5],[5,12],[12,21] self.cgnsBlockIntervals = [] # Initialize array to store volume nodes CGNS intervals for each instance self.cgnsVolNodeIntervals = [] # Initialize block counter blockCounter = 0 # Initialize node counter nodeCounter = 0 # Load the CGNS file combined_file = cs.readGrid(CGNSFile) # Explode the CGNS file by zones (this will only work if the user used cgns_utils combine # to create the input CGNS file, since the explosion uses the domain names) grids, zoneNames = cs.explodeByZoneName(combined_file) # Save temporary grid files with the exploded zones for grid, zoneName in zip(grids, zoneNames): grid.writeToCGNS("_" + zoneName + ".cgns") # Store the number of blocks in each zone self.cgnsBlockIntervals.append([blockCounter, blockCounter + len(grid.blocks)]) # Count the number of nodes (here is degrees of freedom or nodes*3 totalNodes = 0 for blk in grid.blocks: totalNodes += blk.dims[0] * blk.dims[1] * blk.dims[2] * 3 # Store the number of volume nodes in each zone self.cgnsVolNodeIntervals.append([nodeCounter, nodeCounter + totalNodes]) # Update block counter blockCounter = blockCounter + len(grid.blocks) # Update node counter nodeCounter = nodeCounter + totalNodes # Delete grids to free space del grids del combined_file else: # Initialize variables to get results in the end zoneNames = None self.cgnsBlockIntervals = None self.cgnsVolNodeIntervals = None # Send information to all procs zoneNames = self.comm.bcast(zoneNames, root=0) self.cgnsBlockIntervals = self.comm.bcast(self.cgnsBlockIntervals, root=0) self.cgnsVolNodeIntervals = self.comm.bcast(self.cgnsVolNodeIntervals, root=0) # Get names for nearfield meshes. # The nearfield mesh names will be the keys of the options dictionary. nearfieldNames = optionsDict.keys() # Initialize list of IDWarp instances self.meshes = [] # Initialize list to hold indices of the background zones self.backgroundInstanceIDs = [] # Loop over all zones that we found in the combined CGNS file for zoneNumber, zoneName in enumerate(zoneNames): # Check if the zone belongs to a nearfield mesh if zoneName in nearfieldNames: # ------------------------------------------------------ # READING NEARFIELD MESHES (The ones that will be warped) # # Assign the name of the temporary CGNS file to the options. # This is the file that contains the mesh o a single component. # Remember that we should use the temporary grid file. optionsDict[zoneName]["gridFile"] = "_" + zoneName + ".cgns" # Initialize an IDWarp instance with the current options if self.dtype == "d": currMesh = self.warp.USMesh(options=optionsDict[zoneName], comm=self.comm) elif self.dtype == "D": currMesh = self.warp.USMesh_C(options=optionsDict[zoneName], comm=self.comm) else: # We have a background mesh # Regenerate the temporary filename for the background grid bgFile = "_" + zoneName + ".cgns" # ------------------------------------------------------ # READING BACKGROUND MESHES # =========================================================# # THIS IS A MESSY (HOPEFULLY TEMPORARY) WAY OF LOADING THE # BACKGROUND MESH NODES. IF YOU COME UP WITH A BETTER WAY # TO GET volNodes, PLEASE ADD IT HERE. # volNodes is a flattened vector that contains the background # mesh volume nodes that belong to the current proc. # Let's try using IDWarp's CGNS loader to extract the bakground nodes. # However, we will have to trick IDWarp in order to do this, since it # expects a surface mesh in the file. # So we will make a copy of the background mesh file, assign an arbitrary # wall surface, and then load it with IDWarp # Only the root proc will modify the input file if self.myID == 0: # Make a copy of the background mesh file os.system("cp " + bgFile + " tmp_bg_file.cgns") # Create a temporary BC file with open("tmp_bcdata.dat", "w") as fid: fid.write("1 iLow BCwall wall\n") # Use CGNS utils to modify the BCs os.system("cgns_utils overwritebc tmp_bg_file.cgns tmp_bcdata.dat") # Create dummy set of options just to load the CGNS file dummyOptions = { "gridFile": "tmp_bg_file.cgns", "warpType": "unstructured", } # Initialize an IDWarp instance with the current options if self.dtype == "d": currMesh = self.warp.USMesh(options=dummyOptions, comm=self.comm) elif self.dtype == "D": currMesh = self.warp.USMesh_C(options=dummyOptions, comm=self.comm) # Initialize a dummy surface in the background mesh """ if self.myID == 0: print('===========================================') print('ATTENTION: This is a dummy initialization for background mesh warping.') pts = np.array([[1.0, 0.0, 1.0], [2.0, 0.0, 1.0], [2.0, 1.0, 1.0], [1.0, 1.0, 1.0]])*(self.myID+1) conn = np.array([0,1,2,3]) faceSizes = np.array([4]) currMesh.setSurfaceDefinition(pts, conn, faceSizes) if self.myID == 0: print('Dummy initialization is Done!') print('===========================================') """ if self.myID == 0: print("===========================================") print("ATTENTION: This is a dummy initialization for background mesh warping.") currMesh._setInternalSurface() if self.myID == 0: print("Dummy initialization is Done!") print("===========================================") # The root proc can remove the temporary files if self.myID == 0: # Make a copy of the background mesh file os.system("rm tmp_bg_file.cgns") os.system("rm tmp_bcdata.dat") # Store the ID of this zone self.backgroundInstanceIDs.append(zoneNumber) # Append the instance to the list. # We will store even the background mesh instances for now, # but we will delete them as soon as we call self.setExternalMeshIndices(). self.meshes.append(currMesh) # Now the root proc can remove the temporary grid files if self.myID == 0: for zoneName in zoneNames: os.system("rm _" + zoneName + ".cgns") # ------------------------------------------------------ # Initialize other fields for completness self.numSurfNodes = None # How many solver surface nodes we have in the current proc, for all instances self.numVolNodes = None # How many solver volume nodes we have in the current proc, for all instances self.cgnsVolNodeMasks = [] # Mask used to filter which volume nodes given by the solver belong to each instance
def __init__(self, options=None, comm=None, debug=False): """ Create the USMesh object. Parameters ---------- options : dictionary A dictionary containing the options for the mesh movement strategies. THIS IS NOT OPTIONAL. comm : MPI_INTRA_COMM MPI communication (as obtained from mpi4py) on which to create the USMesh object. If not provided, MPI_COMM_WORLD is used. debug : bool Flag specifying if the MExt import is automatically deleted. This needs to be true ONLY when a symbolic debugger is used. """ name = "IDWarp" category = "Volume mesh warping" if comm is None: comm = MPI.COMM_WORLD # Default options for mesh warping defOpts = self._getDefaultOptions() if options is None: raise Error("The 'options' keyword argument is *NOT* " "optional. An options dictionary must be passed upon " "creation of this object") # Initialize the inherited BaseSolver super().__init__(name, category, defaultOptions=defOpts, options=options, comm=comm) self.printOptions() # Check if warp has already been set if this has been # inherited to complex version try: self.warp except AttributeError: curDir = os.path.basename( os.path.dirname(os.path.realpath(__file__))) self.warp = MExt("libidwarp", curDir, debug=debug)._module # Initialize PETSc if not done so self.warp.initpetsc(self.comm.py2f()) # Set realtype of 'd'. 'D' is used in Complex and set in # UnstructuredMesh_C.py self.dtype = "d" # Set Fortran options values self._setMeshOptions() # Initialize various bits of stored information self.OFData = {} self.warpInitialized = False self.faceSizes = None self.fileType = self.getOption("fileType") fileName = self.getOption("gridFile") # Determine how to read if self.fileType == "CGNS": # Determine type of CGNS mesh we have self.warp.readcgns(fileName) elif self.fileType == "OpenFOAM": self._readOFGrid(fileName) elif self.fileType == "PLOT3D": self.warp.readplot3d(fileName)
def __init__(self, comm=None, options=None, debug=False): """ Create the pyHyp object. Parameters ---------- comm : MPI_INTRACOMM Comm to use. This is used when running in parallel. If not provided, MPI.COMM_WORLD is used by default. options : dict A dictionary containing the the options for pyHyp. debug : bool Flag used to specify if debugging. This only needs to be set to true when using a symbolic debugger. """ name = "pyHyp" category = "Hyperbolic mesh generator" informs = {} # Set the possible MPI Intracomm if comm is None: comm = MPI.COMM_WORLD # Default options for hyperbolic generation defOpts = self._getDefaultOptions() # Use supplied options if options is None: raise Error("The options = keyword argument is *NOT* optional. " "It must always be provided") # Initialize the inherited BaseSolver super().__init__(name, category, defaultOptions=defOpts, options=options, comm=comm, informs=informs) # Import and set the hyp module curDir = os.path.dirname(os.path.realpath(__file__)) self.hyp = MExt.MExt("hyp", [curDir], debug=debug)._module # Initialize PETSc and MPI if not already done so: self.hyp.initpetsc(self.comm.py2f()) # Set the fortan options self._setOptions() self.gridGenerated = False # Convert file type to integer fileType = {"CGNS": self.hyp.hypinput.cgnsfiletype, "PLOT3D": self.hyp.hypinput.plot3dfiletype} intFileType = fileType[self.getOption("fileType")] # Determine how we are getting data: by Input file or # explictly by patches. patchInput = False patches = self.getOption("patches") if len(patches) > 0: patchInput = True nBlocks = len(patches) if not patchInput: if not os.path.isfile(self.getOption("inputFile")): raise Error("Input file '%s' not found." % self.getOption("inputFile")) # Determine the number of blocks we have so we can initialize # the BC array: nBlocks = self.hyp.getnblocks(self.getOption("inputFile"), intFileType) self.hyp.allocatefamilies(nBlocks) if self.getOption("noPointReduce") and nBlocks > 1: raise Error("The noPointReduce option may only be true when " "a single surface grid is provided.") # The fortran BC information fBCs = numpy.zeros((4, nBlocks), order="f") fBCs[:, :] = self.hyp.hypinput.bcdefault # The python BC information BCs = self.getOption("BC") BCMap = { "splay": self.hyp.hypinput.bcsplay, "xsymm": self.hyp.hypinput.bcxsymm, "ysymm": self.hyp.hypinput.bcysymm, "zsymm": self.hyp.hypinput.bczsymm, "xconst": self.hyp.hypinput.bcxconst, "yconst": self.hyp.hypinput.bcyconst, "zconst": self.hyp.hypinput.bczconst, "xyconst": self.hyp.hypinput.bcxyconst, "yzconst": self.hyp.hypinput.bcyzconst, "xzconst": self.hyp.hypinput.bcxzconst, } edgeMap = { "ilow": self.hyp.hypinput.ilow - 1, "ihigh": self.hyp.hypinput.ihigh - 1, "jlow": self.hyp.hypinput.jlow - 1, "jhigh": self.hyp.hypinput.jhigh - 1, } helpStr = "An example of a boundary specification is: 'BC':{1:{'iLow':'ySymm'}, 2:{'jHigh':'splay'}}" for blkBC in BCs: if blkBC < 1 or blkBC > nBlocks or not isinstance(blkBC, int): raise Error( "Keys in BC array must be 1-based integers and less " "than or equal to the total number of blocks. %s" % helpStr ) for edgeKey in BCs[blkBC]: lKey = edgeKey.lower() if lKey not in edgeMap.keys(): raise Error( "Boundary edge specification must be one of: " "'iLow', 'iHigh', 'jLow', or 'jHigh'. %s" % helpStr ) BCToSet = BCs[blkBC][edgeKey].lower() if BCToSet.lower() not in BCMap.keys(): raise Error( "Boundary condition specification unknown. Must be one of: " "'splay', 'xSymm', 'ySymm', 'zSymm', " "'xConst', 'yConst', 'zConst, 'xyConst, " "'yzConst or xzConst'. %s" % helpStr ) fBCs[edgeMap[lKey], blkBC - 1] = BCMap[BCToSet] # Set the boundary condition information into fortran self.hyp.hypinput.bcs = fBCs # Now process the family information if we have any: families = self.getOption("families") fFamilies = [] # Set default a default name of "wall". for _i in range(nBlocks): fFamilies.append("Wall") # If we were given a CGNS file we might have families # there. So load them and overwrite the default. if intFileType == self.hyp.hypinput.cgnsfiletype: if self.comm.rank == 0: for i in range(nBlocks): family, foundFam = self.hyp.readfamily(self.getOption("inputFile"), i + 1) if foundFam and len(family.strip()) > 0: fFamilies[i] = family.strip() fFamilies = self.comm.bcast(fFamilies) # If we have explictly other families given, these will # overwrite anything we already have. if isinstance(families, str): for i in range(nBlocks): fFamilies[i] = families elif isinstance(families, dict): for blkBC in families: if blkBC < 1 or blkBC > nBlocks or not isinstance(blkBC, int): raise Error( "Keys in families dictionary must be 1-based integers and less " "than or equal to the total number of blocks" ) fFamilies[blkBC - 1] = families[blkBC] else: raise Error( "'families' option must be a string or a dictionary. A string will " "set all wall families to the single string. A dictionary with " "one-based keys for the blocks may be used to specify individual " "families for each block.\n Examples: 'families':'fuselage' or" "'families':{'1:'fuselage', 2:'wing'}." ) # Set our family information in fortran for i in range(nBlocks): self.hyp.setfamily(i + 1, fFamilies[i]) # Explicitly set patches if necessary if patchInput: self.hyp.setnumberpatches(len(patches)) for i in range(len(patches)): self.hyp.setpatch(i + 1, patches[i]) intFileType = self.hyp.hypinput.patchinput # Now run the fortran setup. self.hyp.setup(self.getOption("inputFile"), intFileType)
def __init__(self, comm=None, options=None, commonOptions=None, debug=False, skipList=[]): """ The inititalization method will setup, run, and write all the results. Parameters ---------- options : object ORDERED dictionary or list of dictionaries. This contains options for the extrusion of all several grids. An example of option dictionary is given below: .. code-block:: python options = { "epsE": 4.0, "epsI": 8.0, "outputFile": "corner_hyp.cgns", "skip": False, } We can set a list of dictionaries as input: .. code-block:: python options1 = { "epsE": 4.0, "epsI": 8.0, "outputFile": "corner1_hyp.cgns", "skip": False, } options2 = "cartesian.cgns" options3 = { "epsE": 2.0, "epsI": 4.0, "outputFile": "corner2_hyp.cgns", "skip": False, } options = [options1, options2, options3] Alternatively, we can set an ORDERED dictionary of dictionaries as input: .. code-block:: python from collections import OrderedDict options = OrderedDict() options["case1"] = { "epsE": 4.0, "epsI": 8.0, "outputFile": "corner1_hyp.cgns", "skip": False, } options["block"] = "cartesian.cgns" options["case2"] = { "epsE": 2.0, "epsI": 4.0, "outputFile": "corner2_hyp.cgns", "skip": False, } Each element of the list/dictionary will be considered as a different case. One of the elements can be a string specifying a CGNS file that should be combined with the other grids in the end. pyHyp will not do anything with this file except combine it with the generated grids in the corresponding order. These options will overwrite the default options (defined in the pyHyp class) and the common options (another argument of this method). If the user gives a list, this will be converted to a dictionary with integers as keys. Remember this when setting the skip list for unnamed cases. commomOptions : dict Dictionary with options that should be applied to all cases in the options dictionary. See the 'defOpts' dictionary defined in the pyHyp class to see the available options. skip_list : list List containing names of cases that should be skipped. """ # Set the possible MPI Intracomm if comm is None: comm = MPI.COMM_WORLD self.comm = comm # Get processor ID myid = self.comm.Get_rank() # Convert input to dictionary even if user gave a single element if type(options) is dict: raise Error( "pyHypMulti only accepts Ordered Dictionaries or Lists as inputs." + " Declare your options using options=OrderedDict()" ) elif type(options) is list: # Set ordered dict optionsDict = OrderedDict() # Convert list to dictionary using integers as keys optionsDict = {k: v for (k, v) in zip(range(len(options)), options[:])} else: # User gave an ordered dictionary optionsDict = deepcopy(options) # Add unused common options to each set for name in optionsDict: # Avoid options that are just strings indicating volume grids if type(optionsDict[name]) is not str: for key in commonOptions: if key not in optionsDict[name].keys(): optionsDict[name][key] = commonOptions[key] # Initilize counter index = 0 # Get the number of grids self.numGrids = len(optionsDict) # Initialize dictionary with results self.results = { "name": list(optionsDict.keys()), "outputFile": [0] * self.numGrids, "gridRatio": [0] * self.numGrids, "minQualityOverall": [0] * self.numGrids, "minVolumeOverall": [0] * self.numGrids, } # Loop over all elements in the options list for optionName in optionsDict: options = optionsDict[optionName] if type(options) is str: # Just set up relationships to combine it with the other grids # later on self.results["name"][index] = optionName self.results["outputFile"][index] = options self.results["gridRatio"][index] = "N/A" self.results["minQualityOverall"][index] = "N/A" self.results["minVolumeOverall"][index] = "N/A" # Increment counter index = index + 1 elif optionName in skipList: if myid == 0: print("Skipping case: ", optionName) # Get input file name try: inputFile = options["inputfile"] except KeyError: inputFile = options["inputFile"] # Check if output name exists or if we should get the # the automatically generated one try: outputFile = options["outputfile"] except KeyError: try: outputFile = options["outputFile"] except KeyError: # User probably did not set neither in options or common options outputFile = generateOutputName(inputFile, outputType="cgns") # Save results self.results["name"][index] = optionName self.results["outputFile"][index] = outputFile self.results["gridRatio"][index] = "skip" self.results["minQualityOverall"][index] = "skip" self.results["minVolumeOverall"][index] = "skip" # Increment counter index = index + 1 elif type(options) is dict: # Only the root processor will print if myid == 0: print("") print("") print("Running case %d : %s" % (index, options["inputFile"])) print("") # Create pyHyp object using the corresponding options hypGrid = pyHyp(comm, options, debug) # Run it hypGrid.run() # Write outputs hypGrid.writeOutput() # Save results self.results["name"][index] = optionName self.results["outputFile"][index] = hypGrid.options["outputfile"] self.results["gridRatio"][index] = float(hypGrid.hyp.hypdata.gridratio) self.results["minQualityOverall"][index] = float(hypGrid.hyp.hypdata.minqualityoverall) self.results["minVolumeOverall"][index] = float(hypGrid.hyp.hypdata.minvolumeoverall) # Delete object to free memory del hypGrid # Increment counter index = index + 1 # Print the log self.writeLog()