def calculate_representative_cell_size_v2(dataFolder, jobName): """ The easiest way to calculate the area is to use e3post to generate a slice across the entire nozzle flow-field. The resulting data file contains "volume" information for each cell. For 2D grids this is actually the area (method tip from Rowan G. 14-01-2013) """ # Save the current working directory for later use workingDir = os.getcwd() # Change into the simulation directory os.chdir(dataFolder) # We check first that the required slice file has not already been # created. if not os.path.exists(jobName+".data"): # e3post will require the gas model file so we search for it... if os.path.exists("gas-model.lua"): gmodelFile = 'gas-model.lua' else: gmodelFile = glob.glob("cea-lut-*.lua.gz")[0] run_command(E3BIN+('/e3post.py --job=%s --tindx=0001 --gmodel-file=%s ' % (jobName, gmodelFile)) +('--output-file=%s ' % (jobName+".data")) +'--slice-list=":,:,:,0" ') # Now read in data from the slice file variable_list, data = get_slice_data(jobName+".data") # Calculate the total area (volume) and determine the # representative cell size total_volume = sum(data['volume']) h = np.sqrt( total_volume/np.shape(data['volume'])[0] ) # Change back into the working directory os.chdir(workingDir) return h
def perturb_CoreRadiusFraction(var, perturbedVariables,\ DictOfCases, levels): """ Perturbation of the CoreRadiusFraction may be completed as a post-processing step using the nominal condition flow solution. We don't need to compute separate solutions. This function copies the nominal condition solution to a new case file and then runs nenzfr with the --just-stats option and the perturbed CoreRadiusFraction value. """ for kk in range(levels): if kk != 0: caseString = 'case'+"{0:02}".format(\ perturbedVariables.index(var))+\ "{0:01}".format(kk) # Only if required to we proceed with copying the # nominal case data and calling 'nenzfr.py' if not os.path.exists(caseString): print "Perturbing "+var #command_text = 'rm -r '+caseString #run_command(command_text) # Copy nominal case data into a new case # directory command_text = 'cp -r case000 '+caseString run_command(command_text) # Change into new directory and call nenzfr --just-stats os.chdir(caseString) command_text = 'nenzfr.py --just-stats --CoreRadiusFraction='+\ str(DictOfCases[caseString][perturbedVariables.index(var)]) run_command(command_text) os.chdir('../')
def set_case_running(caseString, caseDict, textString): """ A short function to set a given case running in an appropriately named sub-directory. """ print 60 * "-" print caseString print textString # Create sub-directory for the current case run_command('mkdir ./' + caseString) # Set up the run script for Nenzfr scriptFileName, cfgFileName = prepare_run_script(caseDict, \ caseDict['jobName'].strip('"')+'_'+caseString, caseDict['Cluster']) # Move the run script to its sub-directory command_text = 'mv ' + scriptFileName + ' ./' + caseString + '/' + scriptFileName run_command(command_text) # Move the cfg file to its sub-directory too command_text = 'mv ' + cfgFileName + ' ./' + caseString + '/' + cfgFileName run_command(command_text) # If require, copy the equilibrium gas LUT to the sub-directory if caseDict['chemModel'] == 'eq': command_text = 'cp ./' + caseDict[ 'gmodelFile'] + ' ./' + caseString + '/' run_command(command_text) # Change into the sub-directory, ensure the run script is exectuable and # then run it os.chdir(caseString) run_command('chmod u+x ' + scriptFileName) print "" print caseDict['runCMD'] + scriptFileName print "" # I am not sure how to replace the following line with the run_command function os.system(caseDict['runCMD'] + scriptFileName) os.chdir('../') return
def run_nenzfr(cfg): """Function that accepts a config dictionary and runs Nenzfr. """ # First, check our input and assign any default values required: cfg = input_checker(cfg) #bail out here if there is an issue if cfg['bad_input']: return -2 # # Get the nozzle contour file into the current work directory. if not os.path.exists(cfg['contourFileName']): run_command('cp '+E3BIN+'/nenzfr_data_files/'+cfg['contourFileName']+' .') # Set up the equilibrium gas-model file as a look-up table. if not cfg['justStats']: if cfg['chemModel'] in ['eq',]: if cfg['gasName'] in ['n2']: eqGasModelFile = 'cea-lut-'+upper(cfg['gasName'])+'.lua.gz' else: eqGasModelFile = 'cea-lut-'+cfg['gasName']+'.lua.gz' if not os.path.exists(eqGasModelFile): run_command('build-cea-lut.py --gas='+cfg['gasName']) gmodelFile = eqGasModelFile else: # We'll assume that the gas-model file of default name is set up. # TODO: Luke, this needs to be modified, I suspect. gmodelFile = 'gas-model.lua' # # If we have already run a calculation, it may be that we just want # to extract the exit-flow statistics again. if cfg['justStats']: gmodelFile = read_gmodelFile_from_config(cfg['jobName']) print_stats(cfg['exitSliceFileName'],cfg['jobName'],cfg['coreRfraction'],gmodelFile) return 0 # # Here each facility type will be doing different things (no estcj for expansion tube) if cfg['facility'] == 'reflected-shock-tunnel': # # Runs estcj to get the equilibrium shock-tube conditions up to the nozzle-supply region. command_text = E3BIN+('/estcj.py --gas=%s --T1=%g --p1=%g --Vs=%g --pe=%g --task=st --ofn=%s' % (cfg['gasName'], cfg['T1'], cfg['p1'], cfg['Vs'], cfg['pe'], cfg['jobName'])) run_command(command_text) # Switch off block-sequencing flag if we have selected to run in MPI block-marching mode. if cfg['blockMarching']: sequenceBlocksFlag = 0 else: sequenceBlocksFlag = 1 cfg['nbj'] = 1 # Set up the input script for Eilmer3. # the majority of the inputs will already be in our cfg dictionary, # but we'll add any things that are still needed # add HOME and sequenceBlocksFlag cfg['HOME'] = '$HOME'; cfg['seq_blocks'] = sequenceBlocksFlag # now need to put in empty variables for the other facilities so the # template file works properly if cfg['facility'] == 'reflected-shock-tunnel': # need to set the expansion tube and gun tunnel parameters to nothing for the substitution cfg['T7'] = None; cfg['p7'] = None; cfg['V7'] = None cfg['T0'] = None; cfg['p0'] = None cfg['pitot_input_file'] = None elif cfg['facility'] == 'expansion-tube': # need to set the reflected shock tunnel and gun tunnel parameters to nothing for the substitution cfg['T1'] = None; cfg['p1'] = None; cfg['Vs'] = None cfg['pe'] = None; cfg['T0'] = None; cfg['p0'] = None if 'pitot_input_file' in cfg: cfg['T7'] = None; cfg['p7'] = None; cfg['V7'] = None if 'pitot_input_file' not in cfg: cfg['pitot_input_file'] = None elif cfg['facility'] == 'gun-tunnel': # need to set the reflected shock tunnel and expansion tube parameters to nothing for the substition cfg['T1'] = None; cfg['p1'] = None; cfg['Vs'] = None; cfg['pe'] = None cfg['T7'] = None; cfg['p7'] = None; cfg['V7'] = None cfg['pitot_input_file'] = None # need to put an extra set of quotes around some of the strings for when # they are put into the eilmer 3 template cfg['facility'] = quote(cfg['facility']) cfg['contourFileName'] = quote(cfg['contourFileName']) cfg['gridFileName'] = quote(cfg['gridFileName']) cfg['chemModel'] = quote(cfg['chemModel']) cfg['gasName'] = quote(cfg['gasName']) cfg['reactionSchemeFile'] = quote(cfg['reactionSchemeFile']) cfg['thermalSchemeFile'] = quote(cfg['thermalSchemeFile']) if cfg['pitot_input_file']: cfg['pitot_input_file'] = quote(cfg['pitot_input_file']) prepare_input_script(cfg, cfg['jobName']) run_command(E3BIN+('/e3prep.py --job=%s --do-svg --clean-start' % (cfg['jobName'],))) # Run the simulation code. if cfg['blockMarching']: # Run Eilmer3 either in the multi-processor block-marching mode. run_command(E3BIN+('/e3march.py --job=%s --run --nbj=%d' % (cfg['jobName'],cfg['nbj']))) # Generate slice list for exit plane. exitPlaneSlice = '-' + str(cfg['nbj']) + ':-1,-2,:,0' # Generate slice list for centreline. noOfBlks = None fp = file(cfg['jobName'] + '.config') for line in fp: if 'nblock =' in line: tokens = line.split() noOfBlks = int(tokens[2]) if noOfBlks is None: raise RuntimeError("Didn't manage to find the number of blocks in config file.") centrelineSlice = '0,:,1,0' for blk in range(cfg['nbj'], noOfBlks, cfg['nbj']): centrelineSlice += ';' + str(blk) + ',:,1,0' else: # Run Eilmer3 either in the single processor mode # where the block-sequencing happens in the C++ code. run_command(E3BIN+('/e3shared.exe --job=%s --run' % (cfg['jobName'],))) # Generate slice list for exit plane and centreline. exitPlaneSlice = '-1,-2,:,0' centrelineSlice = ':,:,1,0' # # Exit plane slice run_command(E3BIN+('/e3post.py --job=%s --tindx=0001 --gmodel-file=%s ' % (cfg['jobName'], gmodelFile)) +('--output-file=%s ' % (cfg['exitSliceFileName'],)) +('--slice-list="%s" ' % exitPlaneSlice) +'--add-mach --add-pitot --add-total-enthalpy --add-total-p') # Centerline slice run_command(E3BIN+('/e3post.py --job=%s --tindx=0001 --gmodel-file=%s ' % (cfg['jobName'], gmodelFile)) +('--output-file=%s-centreline.data ' % (cfg['jobName'],)) +('--slice-list="%s" ' % centrelineSlice) +'--add-mach --add-pitot --add-total-enthalpy --add-total-p') # Generate files for plotting with Paraview. run_command(E3BIN+('/e3post.py --job=%s --vtk-xml --tindx=0001 --gmodel-file=%s ' % (cfg['jobName'], gmodelFile)) +'--add-mach --add-pitot --add-total-enthalpy --add-total-p') # Compute viscous data at the nozzle wall run_command(E3BIN+'/nenzfr_compute_viscous_data.py --job=%s --nbj=%s' % (cfg['jobName'], cfg['nbj'])) # Generate averaged exit flow properties if not cfg['noStats']: print_stats(cfg['exitSliceFileName'],cfg['jobName'],cfg['coreRfraction'],gmodelFile) # return
E3BIN = os.path.expandvars("$HOME/e3bin") sys.path.append(E3BIN) # # This script must be called from the following directory: # work/space_planes_2012/sensitivity_air/ # # Load some nenzfr data for us to use data, Var = read_nenzfr_outfile('./case000/nozzle-exit.stats') # Read gas model file gmodelFile = read_gmodelFile_from_config('./case000/nozzle') if not os.path.exists(gmodelFile): run_command('cp ./case000/' + gmodelFile + ' ./') # Define forebody angle theta_rad = 6.0 * math.pi / 180.0 # Extract a species dictionary from the input data speciesKeys = [k for k in data.keys() if k.startswith("mass")] speciesMassFrac = [data[k] for k in speciesKeys] speciesDict = dict([(k.split('-')[1], v) for k, v in zip(speciesKeys, speciesMassFrac)]) # Create a Thermally Perfect Gas object thermal = Gas_thermal(name='air5species', speciesDict=speciesDict, gasModelFile=gmodelFile) thermal.set_pT(p=data['p'], T=data['T[0]'])
def main(cfg={}): """ Examine the command-line options to decide the what to do and then coordinate a series of Nenzfr calculations with various inputs perturbed around the input nominal condition. """ op = optparse.OptionParser(version=VERSION_STRING) op.add_option('-c', '--config_file', dest='config_file', help=("filename for the config file")) opt, args = op.parse_args() config_file = opt.config_file if not cfg: #if the configuration dictionary has not been filled up already, load it from a file try: #from Rowan's onedval program execfile(config_file, globals(), cfg) except IOError as e: print "Error {0}".format(str(e)) print "There was a problem reading the config file: '{0}'".format( config_file) print "Check that it conforms to Python syntax." print "Bailing out!" sys.exit(1) #check inputs using original nenzfr input checker first cfg['bad_input'] = False cfg = input_checker(cfg) # add default pertubations and check new nenzfr perturbed inputs # Set the default relative perturbation values (as percentages) cfg['defaultPerturbations'] = { 'p1': 2.5, 'T1': 2.5, 'Vs': 2.5, 'pe': 2.5, 'Tw': 2.5, 'BLTrans': 2.5, 'TurbVisRatio': 2.5, 'TurbInten': 2.5, 'CoreRadiusFraction': 2.5 } cfg = nenzfr_perturbed_input_checker(cfg) #bail out here if there is an issue if cfg['bad_input']: return -2 perturbedDict = cfg['perturbedDict'] for var in perturbedDict.keys(): cfg[var] = perturbedDict[var][0] # As building an equilibrium gas LUT is so time consuming, we do it here # and then copy the resulting LUT into each case sub-directory. The following # lines are copied almost verbatim from "nenzfr.py" if cfg['chemModel'] in ['eq']: if cfg['gasName'] in ['n2']: eqGasModelFile = 'cea-lut-' + upper(cfg['gasName']) + '.lua.gz' else: eqGasModelFile = 'cea-lut-' + cfg['gasName'] + '.lua.gz' if not os.path.exists(eqGasModelFile): run_command('build-cea-lut.py --gas=' + cfg['gasName']) cfg['gmodelFile'] = eqGasModelFile if not cfg['createRSA']: # Perturbing for a sensitivity calculation # Calculate Nominal condition caseString = 'case' + "{0:02}".format(0) + "{0:01}".format(0) textString = "Nominal Condition" caseDict = copy.copy(cfg) caseDict['caseName'] = caseString write_case_config(caseDict) # Run the nominal case and write the values of the perturbed variables # to a summary file set_case_running(caseString, caseDict, textString) write_case_summary(cfg['perturbedVariables'], caseDict, caseString, 1) # Now run all the perturbed conditions for k in range(len(cfg['perturbedVariables'])): var = cfg['perturbedVariables'][k] perturbCount = cfg['levels'] for kk in range(perturbCount): if kk != 0: caseString = 'case' + "{0:02}".format(k) + "{0:01}".format( kk) textString = var + " perturbed to " + str( perturbedDict[var][kk]) # Perturbation of the "CoreRadiusFraction" input may be done # by (re)post processing the nominal case solution using the # --just-stats option in nenzfr. We therefore don't need to # run any separate cases for this variable. The perturbation # is handled by "nenzfr_sensitivity.py". caseDict = copy.copy(cfg) caseDict[var] = perturbedDict[var][kk] caseDict['caseName'] = caseString if var != 'CoreRadiusFraction': # Run the current case set_case_running(caseString, caseDict, textString) # Write current case to the summary file write_case_summary(cfg['perturbedVariables'],caseDict,\ caseString,0) else: # Perturbing to create a LUT var1 = cfg['perturbedVariables'][0] # 'Vs' var2 = cfg['perturbedVariables'][1] # 'pe' if cfg['levels'] == 2.5: casesToRun = [(2, 1), (1, 1), (0, 0), (2, 2), (1, 2)] elif cfg['levels'] == 3: casesToRun = [(2, 1), (0, 1), (1, 1), (2, 0), (0, 0), (1, 0), (2, 2), (0, 2), (1, 2)] elif cfg['levels'] == 5: casesToRun = [(4, 3), (0, 3), (3, 3), (2, 1), (1, 1), (4, 0), (0, 0), (3, 0), (2, 2), (1, 2), (4, 4), (0, 4), (3, 4)] #casesToRun = [ (2,3), (1,3), # (4,1), (0,1), (3,1), # (2,0), (1,0), # (4,2), (0,2), (3,2), # (2,4), (1,4) ] # Run the nominal case first caseString = 'case' + "{0:01}{0:01}".format(0, 0) caseDict = copy.copy(paramDict) caseDict['caseName'] = caseString textString = "Nominal Case: "+var1+"="+str(perturbedDict[var1][0])+\ "; "+var2+"="+str(perturbedDict[var2][0]) write_case_config(caseDict) set_case_running(caseString, caseDict, textString) write_case_summary(cfg['perturbedVariables'], caseDict, caseString, 1) # Now run all other cases for case in casesToRun: if case != (0, 0): caseString = 'case' + "{0:01}{1:01}".format(case[0], case[1]) textString = var1+" perturbed to "+str(perturbedDict[var1][case[0]])+\ "\n"+var2+" perturbed to "+str(perturbedDict[var2][case[1]]) caseDict = copy.copy(paramDict) caseDict['caseName'] = caseString caseDict[var1] = perturbedDict[var1][case[0]] caseDict[var2] = perturbedDict[var2][case[1]] set_case_running(caseString, caseDict, textString) write_case_summary(cfg['perturbedVariables'], caseDict, caseString, 0)
def objective_function(y): """ Objective function for the design of the internal contour of a T4 nozzle. The input y is a list of coordinates of the Bezier control points for the nozzle. The radial coordinates are specified relative to the radial coordinate of the upstream point. """ nFixedPts = 2 # Number of fixed Bezier control points # Targets M_target = 7.0 # Target Mach number dtheta_target = 0.02 # Target variation in outflow angle (in degrees) dM_target = 0.01 # Target variation in Mach number # Flag for the inclusion of a secondary penalty function in our optimisation. # Switch this on, if your optimised nozzle contour always ends with a negative # gradient (i.e. the nozzle curves towards its axis near the exit of the contour.) # This secondary penalty function should limit this inward-turning phenomena. include_penalty_function = 0 # Read in the x-coordinates for all points and the y-coordinates for # the first nFixedPts points - they are fixed and not changed in the # optimisation run. x_fixed = [] y_fixed = [] fi = open(basename + '.initial.data', 'r') fi.readline() # Skip the first line while True: buf = fi.readline().strip() if len(buf) == 0: break tokens = [float(word) for word in buf.split()] x_fixed.append(tokens[0]) y_fixed.append(tokens[1]) fi.close() # Use x and y to create new data file containing Bezier control # points to generate the internal contour of the nozzle. fo = open(basename + '.data', 'w') fo.write('# x, m y, m \n') for indx in range(nFixedPts): fo.write('%.7f %.7f \n' % (x_fixed[indx], y_fixed[indx])) y_absolute = y_fixed[nFixedPts - 1] for indx in range(nFixedPts, len(y) + nFixedPts): y_absolute += y[indx - nFixedPts] fo.write('%.7f %.7f \n' % (x_fixed[indx], y_absolute)) fo.close() # Run nenzfr with the given nozzle contour. run_command('./run-nenzfr.sh') # Read in an extracted slice of data at the exit of the nozzle fi = open('nozzle-exit.data', 'r') # Keep a list of variables in order of appearance (from nenzfr.py). varLine = fi.readline().strip() items = varLine.split() if items[0] == '#': del items[0] if items[0] == 'Variables:': del items[0] variable_list = [item.split(':')[1] for item in items] # Store the data in lists against these names (from nenzfr.py). data = {} for var in variable_list: data[var] = [] for line in fi.readlines(): items = line.strip().split() if items[0] == '#': continue assert len(items) == len(variable_list) for i in range(len(items)): data[variable_list[i]].append(float(items[i])) fi.close() # Secondary functions that contribute to the objective function. f_theta = 0.0 f_M = 0.0 # Initialise both functions to zero first. N = 0 # Initialise the counter for the number of cells in the boundary layer. for j in range(len(data['pos.y'])): # Definition used by Chris Craddock to estimate the boundary layer edge if j == 0: dMdy = 0.0 # Set to some number so that the first point # is not set as the boundary layer edge. else: dMdy = (data['M_local'][j] - data['M_local'][j-1]) /\ (data['pos.y'][j] - data['pos.y'][j-1]) # If dMdy >= -20.0, then we are in the core flow. if dMdy >= -20.0: f_theta += (data['vel.y'][j] / data['vel.x'][j])**2 f_M += (data['M_local'][j] - M_target)**2 N += 1 # Weighting parameters. phi_theta = 1.0 / tan(radians(dtheta_target)) phi_M = 1.0 / dM_target # Weight the secondary functions by weighting parameters. f_theta = phi_theta**2 / N * f_theta f_M = phi_M**2 / N * f_M # Secondary penalty function if include_penalty_function == 1: # Whenever the Bezier control points start going towards the nozzle axis, # impose a really large penalty. Note though that this means that the # optimiser might never reach its target objective of 1.0. if min(y) < 0.0: f_penalty = 1e9 else: f_penalty = 0.0 # Objective function if include_penalty_function == 1: obj_funct = (f_theta + f_M + f_penalty)**2 else: obj_funct = (f_theta + f_M)**2 # Clean data from current nenzfr run before starting the next run. run_command('./clean-data-from-nenzfr-run.sh') return obj_funct
def main(): """ Examine the command-line options to decide the what to do and then coordinate the calculations done by Nenzfr. """ op = optparse.OptionParser(version=VERSION_STRING) op.add_option('--runCMD', dest='runCMD', default='./', choices=['./', 'qsub '], help=("command used to execute the run-script file " "[default: %default]")) op.add_option( '--Cluster', dest='Cluster', default='Mango', choices=['Mango', 'Barrine'], help=("specify on which cluster the computations are to be ran. " "This is used to define which run template script will " "be used. Options: " "Mango; Barrine [default: %default]")) op.add_option('--tstart', dest='tstart', type='float', default='1.5e-3', help=("time at which the first slice of the input pressure " "trace is to begin [default: %default]")) op.add_option('--nsteps', dest='nsteps', type='int', default=5, help=("number of slices to use [default: %default]")) op.add_option('--dt', dest='dt', type='float', default=0.5e-3, help=("width of each averaging slice [default: %default]")) op.add_option( '--pefile', dest='peFileName', default=None, help="file specifying the transient equilibrium pressure (in Pa) " "[default: %default]") op.add_option('--job', dest='jobName', default='nozzle', help="base name for Eilmer3 files [default: %default]") # Multitude of options required by nenzfr. op.add_option('--gas', dest='gasName', default='air', choices=['air', 'air5species', 'n2', 'co2', 'h2ne'], help=("name of gas model: " "air; " "air5species; " "n2; " "co2; " "h2ne")) op.add_option('--p1', dest='p1', type='float', default=None, help=("shock tube fill pressure or static pressure, in Pa")) op.add_option('--T1', dest='T1', type='float', default=None, help=("shock tube fill temperature, in degrees K")) op.add_option('--Vs', dest='Vs', type='float', default=None, help=("incident shock speed, in m/s")) op.add_option('--chem', dest='chemModel', default='eq', choices=['eq', 'neq', 'frz', 'frz2'], help=("chemistry model: " "eq=equilibrium; " "neq=non-equilibrium; " "frz=frozen " "[default: %default]")) op.add_option('--area', dest='areaRatio', default=1581.165, help=("nozzle area ratio. only used for estcj calc. " "use when --cfile(--gfile) are " "specified. [default: %default]")) op.add_option('--cfile', dest='contourFileName', default='Bezier-control-pts-t4-m10.data', help="file containing Bezier control points " "for nozzle contour [default: %default]") op.add_option('--gfile', dest='gridFileName', default='None', help="file containing nozzle grid. " "overrides --cfile if both are given " "[default: %default]") op.add_option( '--exitfile', dest='exitSliceFileName', default='nozzle-exit.data', help="file for holding the nozzle-exit data [default: %default]") op.add_option('--block-marching', dest='blockMarching', action='store_true', default=False, help="run nenzfr in block-marching mode") # The following defaults suit a Mach 10 Nozzle calculation. op.add_option('--nni', dest='nni', type='int', default=1800, help=("number of axial cells [default: %default]")) op.add_option('--nnj', dest='nnj', type='int', default=100, help=("number of radial cells [default: %default]")) op.add_option( '--nbi', dest='nbi', type='int', default=180, help=("number of axial blocks for the divergence section (nozzle_blk) " "[default: %default]")) op.add_option('--nbj', dest='nbj', type='int', default=1, help=("number of radial blocks [default: %default]")) op.add_option( '--bx', dest='bx', type='float', default=1.05, help=("clustering in the axial direction [default: %default]")) op.add_option( '--by', dest='by', type='float', default=1.002, help=("clustering in the radial direction [default: %default]")) op.add_option( '--max-time', dest='max_time', type='float', default=6.0e-3, help=("overall simulation time for nozzle flow [default: %default]")) op.add_option( '--max-step', dest='max_step', type='int', default=800000, help=("maximum simulation steps allowed [default: %default]")) op.add_option('--Twall', dest='Tw', type='float', default=300.0, help=("Nozzle wall temperature, in K " "[default: %default]")) op.add_option('--BLTrans', dest='BLTrans', default="x_c[-1]*1.1", help=("Transition location for the Boundary layer. Used " "to define the turbulent portion of the nozzle. " "[default: >nozzle length i.e. laminar nozzle]")) op.add_option('--TurbVisRatio', dest='TurbVisRatio', type='float', default=100.0, help=("Turbulent to Laminar Viscosity Ratio " "[default: %default]")) op.add_option('--TurbIntensity', dest='TurbInten', type='float', default=0.05, help=("Turbulence intensity at the throat " "[default: %default]")) op.add_option('--CoreRadiusFraction', dest="coreRfraction", type='float', default=2.0 / 3.0, help=("Radius of core flow as a fraction of " "the nozzle exit radius [default: %default]")) opt, args = op.parse_args() # Go ahead with a new calculation. # First, make sure that we have the needed parameters. bad_input = False if opt.p1 is None: print "Need to supply a float value for p1." bad_input = True if opt.T1 is None: print "Need to supply a float value for T1." bad_input = True if opt.Vs is None: print "Need to supply a float value for Vs." bad_input = True if opt.peFileName is None: print "Need to supply a string value for pefile." bad_input = True if opt.blockMarching is True: opt.blockMarching = "--block-marching" else: opt.blockMarching = "" if bad_input: return -2 # Calculate the averaged equilibrium pressure for each # time interval... pe = calculate_pe_values(opt.peFileName, opt.tstart, opt.nsteps, opt.dt) # Set up the run script for Nenzfr. paramDict = { 'jobName': quote(opt.jobName), 'gasName': quote(opt.gasName), 'T1': opt.T1, 'p1': opt.p1, 'Vs': opt.Vs, 'chemModel': quote(opt.chemModel), 'contourFileName': quote(opt.contourFileName), 'gridFileName': quote(opt.gridFileName), 'exitSliceFileName': quote(opt.exitSliceFileName), 'areaRatio': opt.areaRatio, 'blockMarching': opt.blockMarching, 'nni': opt.nni, 'nnj': opt.nnj, 'nbi': opt.nbi, 'nbj': opt.nbj, 'bx': opt.bx, 'by': opt.by, 'max_time': opt.max_time, 'max_step': opt.max_step, 'Tw': opt.Tw, 'TurbVisRatio': opt.TurbVisRatio, 'TurbInten': opt.TurbInten, 'BLTrans': opt.BLTrans, 'CoreRadiusFraction': opt.coreRfraction } # As building an equilibrium gas LUT is so time consuming, we do it here # and then copy the resulting LUT into each case sub-directory. The following # lines are copied almost verbatim from "nenzfr.py" if opt.chemModel in ['eq']: if opt.gasName in ['n2']: eqGasModelFile = 'cea-lut-' + upper(opt.gasName) + '.lua.gz' else: eqGasModelFile = 'cea-lut-' + opt.gasName + '.lua.gz' if not os.path.exists(eqGasModelFile): run_command('build-cea-lut.py --gas=' + opt.gasName) paramDict['gmodelFile'] = eqGasModelFile for k in range(len(pe)): # Create sub-directory for the current case. caseString = 'case' + "{0:03}".format(k) paramDict['caseName'] = caseString print 60 * "-" print caseString print "tstart= %f; pe[k]= %f" % (opt.tstart + k * opt.dt, pe[k]) run_command('mkdir ./' + caseString) # Set up the run script for Nenzfr paramDict['pe'] = pe[k] scriptFileName = prepare_run_script(paramDict, opt.jobName + '_' + caseString, opt.Cluster) # Move the run script to its sub-directory command_text = 'mv ' + scriptFileName + ' ./' + caseString + '/' + scriptFileName run_command(command_text) # If require, copy the equilibrium gas LUT to the sub-directory if paramDict['chemModel'] in [ '"eq"', ]: command_text = 'cp ' + paramDict[ 'gmodelFile'] + ' ./' + caseString + '/' run_command(command_text) # Change into the sub-directory, ensure the run script is exectuable and # then run it os.chdir(caseString) run_command('chmod u+x ' + scriptFileName) print "" print opt.runCMD + scriptFileName print "" # I am not sure how to replace the next line with the run_command function os.system(opt.runCMD + scriptFileName) os.chdir('../') return 0
def main(): """ Examine the command-line options, load the necessary data and then calculate the Grid Convergence Error for each nozzle exit flow property. """ op = optparse.OptionParser(version=VERSION_STRING) op.add_option('--run-defaults', dest='runDefaults', action='store_true', default=True, help="calculate GCI using defaults for all other inputs.") op.add_option('--job',dest='jobName', default='nozzle', help="base name for Eilmer3 files [default: %default]") op.add_option('--caseListFile',dest='caseListFile', default='case_list.txt', help=("the name of a text file containing a list of relative pathnames " "for the simulations whose grid error is to be calculated. Files " "should be listed from finest grid to coarsest grid. Comment " "lines should begin with a #. [default: %default]")) op.add_option('--casesToFitFile',dest='caseFitList', default='case_list.txt', help=("the name of a text file containing a list of relative pathnames " "for simulations that will be used to generate a line-of-best" "-fit. Only applicable when --method='linear-fit'. Also, the " "list MUST be a sub-set of the files given in --caseListFile." "[default: %default]")) op.add_option('--safety-factor',dest='safetyFactor', type='float', default=1.0, help=("specify a safety-factor to be applied to the calculated " "convergence error [default: %default]")) op.add_option('--method', dest='method', choices=['linear-fit','ref-solution'], default='linear-fit', help=("specify the calculation method. " "Choices are (1) 'linear-fit' : fit a straight line and extrapolate " "cell size = zero, or (2) 'ref-solution' : consider the solution on " "the finest grid as the 'truth'. [default: %default]" )) op.add_option('--generate-plots', dest='generatePlots', action='store_true', default=False, help=("generate plots showing the convergence trends " "for each freestream property [default: %default]")) opt, args = op.parse_args() # Read the case list file case_list = read_caseList_file(opt.caseListFile) # If required read in the list of cases that will be used to # generate a line-of-best fit. if opt.method in ['linear-fit',]: if opt.caseFitList not in ['case_list.txt',]: case_fit_list = read_caseList_file(opt.caseFitList) print "Linear model will be fitted using simulations given in ",opt.caseFitList else: case_fit_list = copy.copy(case_list) print "Linear model will be fitted using simulations given in ",opt.caseListFile if len(case_fit_list) == 1: print "ERROR: case_fit_list contains only one entry." print " This is not enough to generate a linear fit." print " Check input --caseToFitFile" return -2 if len(case_fit_list) == 2: print "WARNING: case_fit_list contains only two entries." print " Fitted line may not be robust." else: print "Using ",case_list[0]," as the reference ('truth') solution" # Read in nozzle exit flow data print "Reading exit flow files..." FlowData = {} exitProperty = {} for case in case_list: data, props = read_nenzfr_outfile(case+opt.jobName+"-exit.stats") # Add dynamic pressure, unit Reynolds number, mass flux and # static-to-dynamic pressure ratio data, props = add_extra_variables(data, props) FlowData[case] = data exitProperty[case] = props # Calculate the cell size for each simulation and write a summary # file. If a summary file already exists we read that rather then # re-calculate the cell size. if not os.path.exists("cell_sizes.dat"): print "Calculating representative cell size..." # Initialise cellsize dictionary and x array... cellsize = {} x = np.array([]) # Begin writing a summary file fp = open("cell_sizes.dat","w") fp.write("# Representative Cell Sizes\n") fp.write("# Columns are:\n") fp.write("# case, cell-size (metre)\n") # Loop through each case, calculate cell size and its # reciprocal (x = 1/h) and write data to the summary file for case in case_list: h = calculate_representative_cell_size_v2(case, opt.jobName) cellsize[case] = h #x = np.append(x, h) #fp.write("{0:s}\t{1:1.10e}\t{2:1.10e}\n".format(case,h,1.0/h)) fp.write("{0:s}\t{1:1.10e}\n".format(case,h)) print case, ": ",h fp.close() else: print "Reading cell size data from 'cell_sizes.dat'..." fp = open("cell_sizes.dat","r") contents = fp.readlines() fp.close() cellsize = {} x = np.array([]) for line in contents: if not line.startswith("#"): lineData = line.strip().split("\t") cellsize[lineData[0]] = np.float(lineData[1]) #x = np.append(x, np.float(lineData[2])) print lineData[0], ": ",lineData[1] # Now we set about calculating the grid convergence error for # each exit-flow property using the desired method print "Calculating grid errors for each flow property..." linear_fit_params = {} grid_errors = {} true_value = {} for exitVar in exitProperty[case_list[0]]: grid_errors[exitVar] = {} if opt.method in ['linear-fit',]: # Assemble "x" and "y" vectors y = np.array([]) x = np.array([]) for case in case_fit_list: y = np.append(y, FlowData[case][exitVar]) x = np.append(x, cellsize[case]) # Straight line fit through data slope, intercept, r_value, p_value, std_err = linregress(x,y) # Store the parameters of the fitterd curve so we may # write them to a file later linear_fit_params[exitVar] = {'slope':slope,'intercept':intercept,\ 'r_value':r_value,'p_value':p_value,\ 'std_err':std_err} # Stort the "true" value for later use true_value[exitVar] = intercept # Error relative to the linear fit intercept for case in case_list: if intercept != 0.: grid_errors[exitVar][case] = \ (FlowData[case][exitVar] - intercept)/intercept *\ 100.0 * opt.safetyFactor else: grid_errors[exitVar][case] = float('NaN') elif opt.method in ['ref-solution',]: finest = case_list[0] true_value[exitVar] = FlowData[finest][exitVar] # Error relative to the finest grid solution for case in case_list: if true_value[exitVar] != 0.: grid_errors[exitVar][case] = \ (FlowData[case][exitVar] - true_value[exitVar])/\ true_value[exitVar] * 100.0 * opt.safetyFactor else: grid_errors[exitVar][case] = float('NaN') elif opt.method in ['roacheGCI']: print "Calculation method roacheGCI has not yet been implemented" continue # Below are my intial attempts at implementing Roache's GCI # method. Perhaps when I have time I'll revisit this and # complete the implementation with necessary checks... # # ## Check that the grids follow the recommendation #assert h3/h1 > 1.3 #print "h1=",h1 #print "h2=",h2 #print "h3=",h3 #print "h3/h1=",h3/h1 ## Calculate ratios of grid cell sizes #r21 = h2/h1 #r32 = h3/h1 # #exitVar = 'p' #epsilon32 = coarseGridFlowData[exitVar] - mediumGridFlowData[exitVar] #epsilon21 = mediumGridFlowData[exitVar] - fineGridFlowData[exitVar] # #s = math.copysign(1, epsilon32/epsilon21) # #print "s=",s #print "epsilon32=",epsilon32 #print "epsilon21=",epsilon21 # #p_old = 1.0 #1/np.log(r21) * np.abs( 2*np.log( np.abs(epsilon32/epsilon21) ) ) # #q_new = np.log( (r21**p_old-s)/(r32**p_old-s) ) #p_new = 1/np.log(r21) * np.abs( np.log( np.abs(epsilon32/epsilon21) ) + q_new ) # #count = 0 ##while np.abs(p_new - p_old) > 1.0e-5: #and count < 100: ## count += 1 ## p_old = p_new ## q_new = np.log( (r21**p_old-s)/(r32**p_old-s) ) ## p_new = 1/np.log(r21) * np.abs( np.log( np.abs(epsilon32/epsilon21) ) + q_new ) ## #print np.abs(p_new-p_old) ## #print count # ##def error_in_p(x, e32=epsilon32, e21=epsilon21, r32=r32, r21=r21, s=s): # ##p = secant(error_in_p, 1.1, 1.5, tol=1.0e-4,\ ## limits=[0,10]) # #exitVar = 'p' #epsilon32 = coarseGridFlowData[exitVar] - mediumGridFlowData[exitVar] #epsilon21 = mediumGridFlowData[exitVar] - fineGridFlowData[exitVar] # Now write out some summary files print "Writing data files..." if opt.method in ['linear-fit']: # For the linear fit method we write a file containing # the regression results fp = open('linear-fit-regression-params.dat','w') fp.write('# property: slope: intercept: r_value: p_value: std_err\n') for exitVar in exitProperty[case_list[0]]: fp.write('{0:>12s} '.format(exitVar)) fp.write('{0:>12g} '.format(linear_fit_params[exitVar]['slope'])) fp.write('{0:>12g} '.format(linear_fit_params[exitVar]['intercept'])) fp.write('{0:>9g} '.format(linear_fit_params[exitVar]['r_value'])) fp.write('{0:>10g} '.format(linear_fit_params[exitVar]['p_value'])) fp.write('{0:>12g}\n'.format(linear_fit_params[exitVar]['std_err'])) fp.close() # Summary of the errors fp = open("grid-convergence-errors-"+opt.method+".dat",'w') fp.write('# percentage errors relative to the "true-value"\n') fp.write('# safety-factor: {0:1.5f}\n'.format(opt.safetyFactor)) fp.write('# property: "true-value": ') for case in case_list: if case not in [case_list[-1],]: fp.write('{0:s}: '.format(case)) else: fp.write('{0:s}\n'.format(case)) for exitVar in exitProperty[case_list[0]]: fp.write('{0:>12s} '.format(exitVar)) fp.write('{0:>12g} '.format(true_value[exitVar])) for case in case_list: if case not in [case_list[-1],]: fp.write('{0:>10.4f} '.format(grid_errors[exitVar][case])) else: fp.write('{0:>10.4f}\n'.format(grid_errors[exitVar][case])) fp.close() # If desired we set about generating plots. Ideally we would use # matplotlib however I couldn't get it to print figures directly # to pdf on mango. So I decided to use gnuplot. Some more notes # are given below. if opt.generatePlots is True: # Create a directory in which to save the figures if not os.path.exists("./figs/"): print "Creating figure directory..." run_command('mkdir figs') print "Creating plots (via gnuplot)..." # Data file for use with gnuplot fp1 = open('data.dat','w') for case in case_list: fp1.write('{0:s}\t'.format(case)) fp1.write('{0:g}\t'.format(cellsize[case])) for exitVar in exitProperty[case_list[0]]: fp1.write('{0:g}\t'.format(FlowData[case][exitVar])) fp1.write('\n') fp1.close() # Linear-fit data file for use with gnuplot if opt.method in ['linear-fit',]: fp2 = open('fitted.dat','w') #print cellsize.values() #print np.max(cellsize.values()) xFit = np.linspace(0,np.max(cellsize.values())*1.1,200) for k in range(len(xFit)): fp2.write('{0:g}\t'.format(xFit[k])) for exitVar in exitProperty[case_list[0]]: yFit = linear_fit_params[exitVar]['intercept'] +\ linear_fit_params[exitVar]['slope']*xFit[k] fp2.write('{0:g}\t'.format(yFit)) fp2.write('\n') fp2.close() # gnuplot script fp2 = open('gnuplot_script.txt','w') column = 0 for exitVar in exitProperty[case_list[0]]: column += 1 fp2.write('# {0:s}\n'.format(exitVar)) fp2.write('reset\n') fp2.write('set terminal postscript enhanced color eps font ') fp2.write('"Palatino,20" size 7.7cm,7.8cm\n') if exitVar in ['p/q',]: fp2.write('set output "./figs/{0:s}.eps"\n'.format('p-on-q')) else: fp2.write('set output "./figs/{0:s}.eps"\n'.format(exitVar)) fp2.write('set bmargin at screen 0.1474\n') fp2.write('set tmargin at screen 0.85\n') fp2.write('set lmargin at screen 0.235\n') fp2.write('set rmargin at screen 0.96\n') fp2.write('set xlabel "Cell size, 10^{-4} m"\n') #fp2.write('set ylabel "{0:s}"\n'.format(exitVar)) fp2.write('set format y "%g"\n') if exitVar in ['total_h','Re_u',]: fp2.write('set title "{0:s}, 10^6"\n'.format(exitVar)) else: fp2.write('set title "{0:s}"\n'.format(exitVar)) fp2.write('#set key tmargin left Left reverse samplen 2 font ",18" maxrows 2 width 5\n') if exitVar in ['total_h','Re_u']: fp2.write('plot "data.dat" using ($2*10000):(${0:d}/1e6) '.format(column+2)) fp2.write('notitle with points pt 7 lc 1 ps 1.5,\\\n') fp2.write(' "fitted.dat" using ($1*10000):(${0:d}/1e6) '.format(column+1)) fp2.write('notitle with lines lt 1 lw 1.5\n') else: fp2.write('plot "data.dat" using ($2*10000):{0:d} '.format(column+2)) fp2.write('notitle with points pt 7 lc 1 ps 1.5,\\\n') fp2.write(' "fitted.dat" using ($1*10000):{0:d} '.format(column+1)) fp2.write('notitle with lines lt 1 lw 1.5\n') fp2.write('\n') fp2.close() run_command('gnuplot < gnuplot_script.txt') # I couldn't get the following to work on mango. There are # issues with the version of matplotlib and other missing # programs (dvipng ??) and I don't have the time nor # patience to sort it out. # ## Some setup. Alter as desired to suit your needs #params = {'text.usetex': True, # 'font.family': 'serif', # 'font.serif': 'Palatino', # 'axes.labelsize': 10, # 'text.fontsize': 9, # 'legend.fontsize': 9, # 'xtick.labelsize': 10, # 'ytick.labelsize': 10, # 'figure.figsize': [3,3], # 'figure.dpi': 300, # 'lines.linewidth': 2} #plt.rcParams.update(params) # ## Loop over each freestream property #for exitVar in ['p',]: #exitProperty[case_list[0]]: # print "Creating figure for:",exitVar # # Assemble data to be plotted # y = np.array([]) # x = np.array([]) # for case in case_list: # y = np.append(y, FlowData[case][exitVar]) # x = np.append(x, cellsize[case]) # # if opt.method in ['linear-fit',]: # xFit = np.linspace(0,np.max(x)*1.1,250) # yFit = linear_fit_params[exitVar]['intercept'] +\ # linear_fit_params[exitVar]['slope']*xFit # # # Create plot # plt.figure(1) # plt.clf() # plt.plot(x*10000,y,'.b',xFit*10000,yFit,'-r') # plt.ylabel(exitVar) # plt.xlabel('Cell size, 10^{-4} m') # figName = "./figs/"+exitVar+".pdf" # plt.tight_layout() # plt.savefig(figName) return 0
def main(): """ Examine the command-line options and then calculate the sensitivties and uncertainties of each forebody flow property based on a set of nenzfr results generated by "nenzfr_perturbed.py". """ op = optparse.OptionParser(version=VERSION_STRING) op.add_option('--run-defaults', dest='runDefaults', action='store_true', default=True, help="calculate sensitivities and " "uncertainties using all the default values.") op.add_option('--job', dest='jobName', default='nozzle', help="base name for Eilmer3 files [default: %default]") op.add_option('--exitStatsFile', dest='exitStatsFileName', default='nozzle-exit.stats', help="file that holds the averaged nozzle-exit " "data and is to be read in for each perturbation " "case [default: %default]") op.add_option('--estcjFile', dest='estcjFile', default='nozzle-estcj.dat', help="file that holds the estcj result and is to be read in " "for each perturbation case. [default: %default]") op.add_option('--levels', dest='levels', default=3,choices=['3','5'], help=("specify how many points are to be used in the " "calculation of the gradient. Includes the nominal. " "Options: 3, 5 [default: %default]")) # The default values for the following inputs are based on those given in # Rainer Kirchhartz' PhD Thesis (Appendix B) op.add_option('--Xp1', dest='p1', default=0.0325, type='float', help=("relative uncertainty in shock tube fill pressure. " "[default: %default]" )) op.add_option('--XT1', dest='T1', default=0.02, type='float', help=("relative uncertainty in shock tube fill temperature. " "[default: %default]")) op.add_option('--XVs', dest='Vs', default=0.05, type='float', help=("relative uncertainty in the incident shock speed. " "[default: %default]")) op.add_option('--Xpe', dest='pe', default=0.025, type='float', help=("relative uncertainty in the equilibrium pressure " "(after shock reflection). [default: %default]")) # The default values for the following inputs are guestimates :) op.add_option('--XTwall', dest='Tw', default=0.04, type='float', help=("relative uncertainty in nozzle wall temperature. " "[default: %default]")) op.add_option('--XBLTrans', dest='BLTrans', default=1.00, type='float', help=("relative uncertainty in the boundary layer " "transition location. [default: %default]")) op.add_option('--XTurbVisRatio', dest='TurbVisRatio', default=1.00, type='float', help=("relative uncertainty in turbulent-to-laminar " "viscosity ratio at the throat. " "[default: %default]")) op.add_option('--XTurbIntensity', dest='TurbInten', default=0.8, type='float', help=("relative uncertainty in turbulence intensity " "at the throat. [default: %default]")) op.add_option('--XCoreRadiusFraction', dest="coreRfraction", default=0.05, type='float', help=("relative uncertainty in the core flow radius " "fraction. [default: %default]")) op.add_option('--ForebodyAngle',dest='fbAngle', default=6.0, type='float', help=("Angle of forebody relative to nozzle axis in " "degrees [default: %default]")) op.add_option('--XForebodyAngle',dest='XfbAngle', default=0.0167, type='float', help=("relative uncertainty in the forebody angle " "[default: %default]")) opt, args = op.parse_args() # Convert to integer opt.levels = int(opt.levels) # Check to see if the "forebody_conditions.dat" file exists in # the current directory. If it does, we delete it.. if os.path.exists("forebody_conditions.dat"): run_command('rm forebody_conditions.dat') # Read the sensitivity_case_summary file to get the perturbed # variables and their various values perturbedVariables, DictOfCases = read_case_summary() # Add the forebody angle to the list of perturbed variables # and to the DictOfCases. We also add two cases which represent # the perturbation of the forebody angle perturbedVariables.append('fbAngle') for k in DictOfCases.keys(): DictOfCases[k].append(opt.fbAngle) DictOfCases['case091'] = copy.copy(DictOfCases['case000']) DictOfCases['case091'][-1] = opt.fbAngle*1.01 DictOfCases['case092'] = copy.copy(DictOfCases['case000']) DictOfCases['case092'][-1] = opt.fbAngle*0.99 # Create a dictionary of the relative uncertainties of each # variable that may have been perturbed inputUncertainties = {'p1':opt.p1, 'T1':opt.T1, 'Vs':opt.Vs, 'pe':opt.pe, 'Tw':opt.Tw, 'TurbVisRatio':opt.TurbVisRatio, 'TurbInten':opt.TurbInten, 'BLTrans':opt.BLTrans, 'CoreRadiusFraction':opt.coreRfraction, 'fbAngle':opt.XfbAngle} # Define the name of the nominal case and load the exit plane data nominal = 'case000' nominalData, exitVar = read_nenzfr_outfile('./'+nominal+'/'+\ opt.exitStatsFileName) # Define the file name for gas model gmodelFile = read_gmodelFile_from_config('./'+nominal+'/'+opt.jobName) if not os.path.exists(gmodelFile): run_command('cp ./'+nominal+'/'+gmodelFile+' ./') # Calculate forebody flow properties for nominal condition fbNomData, fbVar = calculate_forebody(nominalData, \ DictOfCases[nominal][-1], gmodelFile) # Write out the data to summary file write_forebody_summary(fbNomData, fbVar, nominal) nominalValues = get_values(fbNomData, fbVar) # Loop through each of the perturbed variables sensitivity = {} sensitivity_abs = {} uncertainty = {} for k in range(len(perturbedVariables)): var = perturbedVariables[k] if var == 'CoreRadiusFraction': perturb_CoreRadiusFraction(var, perturbedVariables,\ DictOfCases, opt.levels) # Define the name of the relevant perturbed cases and load the # associated data if var not in ['fbAngle']: high = 'case'+"{0:02}".format(k)+'1' highData, dontNeed = read_nenzfr_outfile('./'+high+'/'+\ opt.exitStatsFileName) fbHighData, dontNeed = calculate_forebody(highData, \ DictOfCases[high][-1], gmodelFile) # Write out the data to summary file write_forebody_summary(fbHighData, fbVar, high) low = 'case'+"{0:02}".format(k)+'2' lowData, dontNeed = read_nenzfr_outfile('./'+low+'/'+\ opt.exitStatsFileName) fbLowData, dontNeed = calculate_forebody(lowData, \ DictOfCases[low][-1], gmodelFile) # Write out the data to summary file write_forebody_summary(fbLowData, fbVar, low) else: # For perturbation of fb_angle we just use the nominalData and # calculate forebody properties with a different angle high = 'case'+"{0:02}".format(k)+'1' fbHighData, dontNeed = calculate_forebody(nominalData, \ DictOfCases[high][-1], gmodelFile) # Write out the data to summary file write_forebody_summary(fbHighData, fbVar, high) low = 'case'+"{0:02}".format(k)+'2' fbLowData, dontNeed = calculate_forebody(nominalData, \ DictOfCases[low][-1], gmodelFile) # Write out the data to summary file write_forebody_summary(fbLowData, fbVar, low) # Values of the freestream properties at the perturbed conditions highValues = get_values(fbHighData,fbVar) lowValues = get_values(fbLowData,fbVar) # Values of the perturbed input values highX = DictOfCases[high][perturbedVariables.index(var)] lowX = DictOfCases[low][perturbedVariables.index(var)] nominalX = DictOfCases[nominal][perturbedVariables.index(var)] if opt.levels == 3: # As the perturbations may not be centered on the nominal # condition we caculate the gradient by taking a weighted # average of the forward and backward derivatives. The # weightings are such that the truncation error associated # with this gradient estimate is O(3) or higher (i.e. the # weightings are such that the second order terms in the Taylor # series expansion cancel. Thanks to D.Petty for this theory.) highWeighting = (nominalX - lowX)/(highX - lowX) lowWeighting = (highX - nominalX)/(highX - lowX) sensitivity_abs[var] = ( highWeighting*(array(highValues)-\ array(nominalValues))/\ (highX - nominalX) + \ lowWeighting*(array(nominalValues)-\ array(lowValues))/\ (nominalX - lowX) ) sensitivity[var] = sensitivity_abs[var]*nominalX/array(nominalValues) else: # For 5 levels per variable we have additional cases # that need to be loaded. Again we do not assume that # the levels are equally spaced around the nominal. # The weightings are such that the truncation error # associated with this estimate is O(4) or higher. if var not in ['fbAngle']: tooHigh = 'case'+"{0:02}".format(k)+'3' tooHighData,dontNeed = \ read_nenzfr_outfile('./'+tooHigh+'/'+opt.exitStatsFileName) fbTooHighData,dontNeed = calculate_forebody(tooHighData, \ DictOfCases[tooHigh][-1], gmodelFile) # Write out the data to summary file write_forebody_summary(fbTooHighData, fbVar, tooHigh) tooLow = 'case'+"{0:02}".format(k)+'4' tooLowData,dontNeed = \ read_nenzfr_outfile('./'+tooLow+'/'+opt.exitStatsFileName) fbTooLowData,dontNeed = calculate_forebody(tooLowData, \ DictOfCases[tooLow][-1], gmodelFile) # Write out the data to summary file write_forebody_summary(fbTooLowData, fbVar, tooLow) else: # For perturbation of fb_angle we just use the nominalData and # calculate forebody properties with a different angle tooHigh = 'case'+"{0:02}".format(k)+'1' fbTooHighData, dontNeed = calculate_forebody(nominalData, \ DictOfCases[tooHigh][-1], gmodelFile) # Write out the data to summary file write_forebody_summary(fbTooHighData, fbVar, tooHigh) tooLow = 'case'+"{0:02}".format(k)+'2' fbTooLowData, dontNeed = calculate_forebody(nominalData, \ DictOfCases[tooLow][-1], gmodelFile) # Write out the data to summary file write_forebody_summary(fbTooLowData, fbVar, tooLow) # Values of the freestream properties at the perturbed conditions tooHighValues = get_values(fbTooHighData,fbVar) tooLowValues = get_values(fbTooLowData,fbVar) # Values of the perturbed input values tooHighX = DictOfCases[tooHigh][perturbedVariables.index(var)] tooLowX = DictOfCases[tooLow][perturbedVariables.index(var)] tooHighDeltaX = tooHighX - nominalX highDeltaX = highX - nominalX tooLowDeltaX = tooLowX - nominalX lowDeltaX = lowX - nominalX weighting = (tooHighX - tooLowX)/(highX - lowX) denom = 1/tooHighDeltaX - 1/tooLowDeltaX -\ (1/highDeltaX - 1/lowDeltaX)*weighting numer = array(tooHighValues)/tooHighDeltaX**2 -\ array(tooLowValues)/tooLowDeltaX**2 -\ ( array(highValues)/highDeltaX**2 -\ array(lowValues)/lowDeltaX**2 )*weighting -\ array(nominalValues)*\ ( 1/tooHighDeltaX**2 - 1/tooLowDeltaX**2 - \ ( 1/highDeltaX**2 - 1/lowDeltaX**2 )*weighting ) sensitivity[var] = numer/denom*nominalX/array(nominalValues) sensitivity_abs[var] = numer/denom # Now calculate the uncertainty in each exit flow variable # due to the uncertainty in the current (perturbed) # input variable uncertainty[var] = sensitivity[var]*inputUncertainties[var] # Write out a file of the sensitivities write_sensitivity_summary(sensitivity, perturbedVariables, fbVar, '', 'relative') write_sensitivity_summary(sensitivity_abs, perturbedVariables, fbVar, '', 'absolute') # Write out a file of the uncertainties write_uncertainty_summary(uncertainty, perturbedVariables, fbVar,\ inputUncertainties) #-------------------------------------------------------------------------------- # Each of the freestream properties has a total bias error associated with # the convergence of the grid, the spatial variation of the exit flow over the # core region and the use of a response surface. # # In order to be able to propagate these bias errors through to the forebody # properties we need to calculate the sensitivities of each forebody property to # each freestream property. # sensitivity_fb_to_fs = {} sensitivity_fb_to_fs_abs = {} varList = [] #print exitVar #print nominalData exclude_list = ['rho','mu','k[0]','M_local','pitot_p','total_p','total_h','dt_chem', 'e[0]','a','omega','tke','k_t','mu_t'] # Loop over each freestream variable for k in range(len(exitVar)): var = exitVar[k] if var not in exclude_list: varList.append(var) # Values of the perturbed freestream values highX = nominalData[var]*1.02 lowX = nominalData[var]*0.98 nominalX = nominalData[var] # Create a copy of the nominal data and up-date the relevant variable with the # perturbed value highData = copy.copy(nominalData); highData[var] = highX lowData = copy.copy(nominalData); lowData[var] = lowX #print "var=",var,"; highX=",highX,"; nominalX=",nominalX,"; lowX=",lowX # Now calculate the forebody conditions theta = DictOfCases[nominal][-1] # forebody angle fbHighData, dontNeed = calculate_forebody(highData, theta, gmodelFile) fbLowData, dontNeed = calculate_forebody(lowData, theta, gmodelFile) #print fbHighData highValues = get_values(fbHighData, fbVar) lowValues = get_values(fbLowData, fbVar) #print nominalValues #print fbVar #assert -1 > 0 # Now calculate the sensitivity if nominalX != 0.0: highWeighting = (nominalX - lowX)/(highX - lowX) lowWeighting = (highX - nominalX)/(highX - lowX) sensitivity_fb_to_fs_abs[var] = ( highWeighting*(array(highValues)-\ array(nominalValues))/\ (highX - nominalX) + \ lowWeighting*(array(nominalValues)-\ array(lowValues))/\ (nominalX - lowX) ) sensitivity_fb_to_fs[var] = sensitivity_fb_to_fs_abs[var]*nominalX/array(nominalValues) else: sensitivity_fb_to_fs_abs[var] = array(nominalValues)*float('nan') sensitivity_fb_to_fs[var] = array(nominalValues)*float('nan') #print "perturbed variables, varList=",varList #print #print "forebody variables, fbVar=",fbVar #print #print sensitivity_fb_to_fs #print # Write out a summary file write_sensitivity_summary(sensitivity_fb_to_fs, varList, fbVar,\ 'fb_to_fs_','relative') write_sensitivity_summary(sensitivity_fb_to_fs_abs, varList, fbVar,\ 'fb_to_fs_','absolute') #print sensitivity_fb_to_fs #print varList return 0