def readOverflowInputFile(basename, radius): parser = f90nml.Parser() overflow_input = parser.read(basename) alpha = overflow_input[u'floinp'][u'alpha'] # DEGREES beta = overflow_input[u'floinp'][u'beta'] # DEGREES fsmach = overflow_input[u'floinp'][u'fsmach'] rey = overflow_input[u'floinp'][u'rey'] # RE/(GRID-UNIT) if u'tinf' in overflow_input[u'floinp']: tinf_tmp = overflow_input[u'floinp'][u'tinf'] # RANKINE else: tinf_tmp = 518.7 if u'refmach' in overflow_input[u'floinp']: refmach = overflow_input[u'floinp'][u'refmach'] else: refmach = fsmach # CALCULATE OMEGA AND VINF BASED ON NON-DIMENSIONAL INPUTS tinf = convert_temperature(tinf_tmp, u'Rankine', u'Kelvin') a = math.sqrt(gamma * gas_constant * tinf) # M/S omega = refmach * a / radius # RAD/S vinf = fsmach * a # M/S vref = refmach * a nu = (1E-03 / rey) * (refmach * a) # CALCULATE THE DENSITY USING SUTHERLAND'S FORMULA FOR VISCOSITY mu0 = 1.716E-05 # lb-s/ft^2 T0 = 273.15 # Rankine mu = mu0 * (tinf / T0)**1.5 * ((T0 + 110.4) / (tinf + 110.4)) rho = mu / nu print u"rho = " + str(rho) return vinf, vref, alpha, beta, omega, a, tinf, rho
def process_metadata(self, lines): def preprocess_edge_cases(lines, inverse=False): replacements = {"W/m": "Wperm", "^": "superscript"} return apply_string_substitutions(lines, replacements, inverse=inverse) def postprocess_edge_cases(value): return preprocess_edge_cases(value, inverse=True) # TODO: replace with f90nml.reads when released (>1.0.2) parser = f90nml.Parser() lines = preprocess_edge_cases(lines) nml = parser._readstream(lines, {}) metadata = {} for k in nml["THISFILE_SPECIFICATIONS"]: metadata_key = k.split("_")[1] try: # have to do this type coercion as nml reads things like # 10superscript22 J into a threepart list, [10, # 'superscript22', 'J'] where the first part is an int value = "".join( [str(v) for v in nml["THISFILE_SPECIFICATIONS"][k]]) metadata[metadata_key] = postprocess_edge_cases(value).strip() except TypeError: metadata[metadata_key] = nml["THISFILE_SPECIFICATIONS"][k] return metadata
def test_default_index(self): parser = f90nml.Parser() parser.default_start_index = 1 test_nml = parser.read('default_index.nml') self.assertEqual(self.default_one_index_nml, test_nml) parser.default_start_index = 0 test_nml = parser.read('default_index.nml') self.assertEqual(self.default_zero_index_nml, test_nml)
def process_metadata(self, lines): # TODO: replace with f90nml.reads when released (>1.0.2) parser = f90nml.Parser() nml = parser._readstream(lines, {}) metadata = { k.split("_")[1]: nml["THISFILE_SPECIFICATIONS"][k] for k in nml["THISFILE_SPECIFICATIONS"] } return metadata
def test_parser_property_invalid(self): parser = f90nml.Parser() self.assertRaises(TypeError, setattr, parser, 'comment_tokens', 123) self.assertRaises(TypeError, setattr, parser, 'default_start_index', 'abc') self.assertRaises(TypeError, setattr, parser, 'sparse_arrays', 'abc') self.assertRaises(TypeError, setattr, parser, 'global_start_index', 'abc') self.assertRaises(TypeError, setattr, parser, 'row_major', 'abc') self.assertRaises(TypeError, setattr, parser, 'strict_logical', 'abc')
def read_namelist(fname): """ MITgcm has some additional requirements when reading namelists, so isolate the logic to this routine """ # MITgcm strips shell-style (#) comments from its namelists nml_parser = f90nml.Parser() nml_parser.comment_tokens += '#' return nml_parser.read(fname)
def perform(self, args): namelist = args.namelist["_"] parser = f90nml.Parser() if os.path.exists(namelist): nml = parser.read(namelist) else: nml = parser.reads(namelist) if args.outfile: f90nml.write(nml, args.outfile["_"]) self.add_forward(data=nml.todict(complex_tuple=True))
def get_old_rdisk(num_rotors): parser = f90nml.Parser() # Create list of rotor input files. Read old collectives from these files. rotor_files = natsort.natsorted(glob.glob("rotor.r*.inp")) rotor_names = [] for file in rotor_files: junk = file.replace('rotor.', '') junk = junk.replace('.inp', '') rotor_names.append(junk) coll_old = [] # Read all the old rdisk files to get the old collectives so I can upate to the new collectives for i in range(num_rotors): rdisk_input = parser.read(rotor_files[i]) coll_old.append(rdisk_input['rdisk_trim']['A0']) return coll_old, rotor_files, rotor_names
def test_logical_repr(self): parser = f90nml.Parser() parser.strict_logical = False test_nml = parser.read('logical.nml') test_nml.true_repr = 'T' test_nml.false_repr = 'F' self.assertEqual(test_nml.false_repr, test_nml.logical_repr[0]) self.assertEqual(test_nml.true_repr, test_nml.logical_repr[1]) self.assert_write(test_nml, 'logical_repr.nml') test_nml.logical_repr = 'F', 'T' self.assert_write(test_nml, 'logical_repr.nml') self.assertRaises(TypeError, setattr, test_nml, 'true_repr', 123) self.assertRaises(TypeError, setattr, test_nml, 'false_repr', 123) self.assertRaises(ValueError, setattr, test_nml, 'true_repr', 'xyz') self.assertRaises(ValueError, setattr, test_nml, 'false_repr', 'xyz') self.assertRaises(TypeError, setattr, test_nml, 'logical_repr', 'xyz') self.assertRaises(ValueError, setattr, test_nml, 'logical_repr', [])
def __init__(self, src, dest=None, copy_method="copy"): """ Initializes a ComponentNamelist see :class:`pyesm.helpers.ComponentFile` arguments Attributes ---------- nml: f90nml.namelist.Namelist A Fortran namelist representation in python see :class:`pyesm.helpers.ComponentFile` for other attributes """ super(ComponentNamelist, self).__init__(src, dest, copy_method) # NOTE: See documentation for f90nml here: # https://f90nml.readthedocs.io/en/latest/ parser = f90nml.Parser() self.nml = parser.read(self.src)
def file_2_nob(path): """ Load a nob from a serialization file :params path: path to a file """ ext = os.path.splitext(path)[-1] if ext in [".yaml", ".yml"]: with open(path, "r") as fin: nob = yaml.load(fin, Loader=yaml.SafeLoader) elif ext in [".json"]: with open(path, "r") as fin: nob = json.load(fin) elif ext in [".nml"]: nmlp = f90nml.Parser() nmlp.read(path) nob = nmlp.tokens raise NotImplementedError("Namelist not fully implemented") else: raise RuntimeError("Format not supported") return nob
def test_rowmaj_multidim(self): parser = f90nml.Parser() parser.row_major = True test_nml = parser.read('multidim.nml') self.assertEqual(self.md_rowmaj_nml, test_nml)
def test_comment_alt(self): parser = f90nml.Parser() parser.comment_tokens = '#' test_nml = parser.read('comment_alt.nml') self.assertEqual(self.comment_alt_nml, test_nml)
def setup(self): # TODO: Find a better place to generate this list files = [ f for f in os.listdir(self.control_path) if f.startswith('data') ] files.append('eedata') # Rudimentary check that matching files are namelists. Can only check # if # namelist is empty. May excluded false positives, but these are # devoid of useful information in that case for fname in files: nml_parser = f90nml.Parser() nml_parser.comment_tokens += '#' data_nml = nml_parser.read(fname) if len(data_nml) > 0: self.config_files.append(fname) else: print("Excluding {0} from configuration files: assumed " "to be not a namelist file (or empty)".format(fname)) # Generic model setup super(Mitgcm, self).setup() # Link restart files to work directory if self.prior_restart_path and not self.expt.repeat_run: # Determine total number of timesteps since initialisation core_restarts = [ f for f in os.listdir(self.prior_restart_path) if f.startswith('pickup.') ] try: # NOTE: Use the most recent, in case of multiple restarts n_iter0 = max([int(f.split('.')[1]) for f in core_restarts]) except ValueError: sys.exit("payu: error: no restart files found.") else: n_iter0 = 0 # Update configuration file 'data' data_path = os.path.join(self.work_path, 'data') # MITgcm strips shell-style (#) comments from its namelists nml_parser = f90nml.Parser() nml_parser.comment_tokens += '#' data_nml = nml_parser.read(data_path) # Timesteps are either global (deltat) or divided into momentum # (deltatmom) and tracer (deltat). If deltat is missing, then we just # try deltatmom. But I am not sure how to best handle this case. # TODO: Sort this out with an MITgcm user try: dt = data_nml['parm03']['deltat'] except KeyError: dt = data_nml['parm03']['deltatmom'] # Runtime seems to be set either by timesteps (ntimesteps) or physical # time (startTime and endTime). # TODO: Sort this out with an MITgcm user try: n_timesteps = data_nml['parm03']['ntimesteps'] pchkpt_freq = dt * n_timesteps except KeyError: t_start = data_nml['parm03']['starttime'] t_end = data_nml['parm03']['endtime'] pchkpt_freq = t_end - t_start # NOTE: Consider permitting pchkpt_freq < dt * n_timesteps # NOTE: May re-enable chkpt_freq in the future data_nml['parm03']['niter0'] = n_iter0 data_nml['parm03']['pchkptfreq'] = pchkpt_freq data_nml['parm03']['chkptfreq'] = 0 data_nml.write(data_path, force=True) # Patch or create data.mnc mnc_header = os.path.join(self.work_path, 'mnc_') data_mnc_path = os.path.join(self.work_path, 'data.mnc') try: data_mnc_nml = nml_parser.read(data_mnc_path) data_mnc_nml['mnc_01']['mnc_outdir_str'] = mnc_header data_mnc_nml.write(data_mnc_path, force=True) except IOError as exc: if exc.errno == errno.ENOENT: mnc_01_grp = { 'mnc_use_outdir': True, 'mnc_use_name_ni0': True, 'mnc_outdir_str': mnc_header, 'mnc_outdir_date': True, 'monitor_mnc': True } data_mnc_nml = {'mnc_01': mnc_01_grp} nml_parser.write(data_mnc_nml, data_mnc_path) else: raise
def test_sparse_arrays(self): parser = f90nml.Parser() parser.sparse_arrays = True test_nml = parser.read('sparse_array.nml') self.assertEqual(self.sparse_array_nml, test_nml)
def test_1(self): """ Main unit test case for `fastnml`. """ print("") print("---------------------") print(" run the save tests:") print("---------------------") print("") outfilename = "sample.nml" d = { "globvars": { "a": { "TF": True, "REAL": 2.0, "int": 146, "array1": [1, 2], "array2": [1, 2], "array3": [1, 2], "str": "string 'with quotes'", "list": ["a", "b", None, "d", "e"], } }, "morevars": [{ "name": 1 }, { "name": 2 }], } save_namelist(d, outfilename) with open(outfilename, "r") as f: print(f.read()) print("") print("---------------------") print(" run the read tests:") print("---------------------") print("") filenames = [ "test.nml", # 112 namelists, all strings [8 sec] "test4.nml", # 112 namelists, all strings, long keys w/ (2) [42 sec] "test4b.nml", # 112 namelists, all strings, long keys no array [9 sec] "test4c.nml", # 112 namelists, all strings, long keys w/ % [12 sec] ] filenames = [os.path.join("tests", f) for f in filenames] n_threads_to_test = 4 # for threading cases tests = [ (read_from_file_f90nml, 0), (read_chunks_f90nml, n_threads_to_test), (read_chunks_simple, n_threads_to_test), ] parser = f90nml.Parser() parser.global_start_index = 1 repeats = 1 run_read_tests(filenames, tests, parser, repeats)
def test_global_index(self): parser = f90nml.Parser() parser.global_start_index = 1 test_nml = parser.read('global_index.nml') self.assertEqual(self.global_index_nml, test_nml)
def writeRotorInp(num_rotors, trim_meth, dr1_coll, dr2_coll, dr3_coll, dr4_coll, \ dr5_coll, dr6_coll, fomoco_target, rdisk_avg, r5_cq, r6_cq, nondim_extra, rtype): ############################################################# # NOW THE TRIMMING_WTAIL.PY SCRIPT IS AMMENDED TO THIS CODE # ############################################################# parser = f90nml.Parser() if rtype == 'bem': # Get rdisk old collectives coll_old, rotor_files, rotor_names = get_old_rdisk(num_rotors) elif rtype == 'blades': coll_old, rotor_names = get_old_blades(num_rotors) # Make a list of the delta collectives for full & half aircraft if num_rotors == 3: coll_delta = list([dr1_coll, dr2_coll, dr5_coll]) # Open the output file so that I can write to the file. output = open("trim_output.log", "a") if trim_meth == 1 or trim_meth == 2: print >> output, "fomoco_avg(0) [Total Drag] = " + str( fomoco_target[0] + nondim_extra) print >> output, "fomoco_avg(4) [Total PM] = " + str( fomoco_target[4]) print >> output, "r5_avg [Torque] = " + str(r5_cq) print >> output, "\n" elif trim_meth == 3: print >> output, "fomoco_target(0) [Total A/C Lift] = " + str( fomoco_target[0]) print >> output, "fomoco_target(4) [Total A/C PM] = " + str( fomoco_target[4]) print >> output, "\n" print >> output, 'Old collectives' for i in range(num_rotors): print >> output, str(rotor_names[i]) + ": " + str(coll_old[i]) print >> output, "\n" print >> output, 'Delta collectives' for i in range(num_rotors): print >> output, str(rotor_names[i]) + ": " + str(coll_delta[i]) print >> output, "\n" coll_new = [] for i in range(num_rotors): coll_new.append(coll_old[i] + coll_delta[i]) print >> output, 'New collectives' for i in range(num_rotors): print >> output, str(rotor_names[i]) + ": " + str(coll_new[i]) print >> output, "\n" if rtype == 'bem': # Write new rotor files using f90nml os.system('rm *.inp2') for i in range(len(rotor_files)): rdisk_input = parser.read(rotor_files[i]) rdisk_input['rdisk_trim']['a0'] = coll_new[i] name = rotor_files[i].replace('.inp', '.inp2') rdisk_input.write(name) os.system('cp ' + name + ' ' + str(rotor_files[i])) # Copy temporary files for the next iteration os.system('cp rotor.r1.inp2 rotor.r1.inp') os.system('cp rotor.r2.inp2 rotor.r2.inp') os.system('cp rotor.r5.inp2 rotor.r5.inp') elif rtype == 'blades': os.system( "sed 's/coll/{p}/' motion.ref.txt > motion.r1.tmp".format( p=coll_new[0])) os.system( "sed 's/coll/{p}/' motion.ref.txt > motion.r2.tmp".format( p=coll_new[1])) os.system( "sed 's/coll/{p}/' motion.ref.txt > motion.r5.tmp".format( p=coll_new[2])) os.system("mv motion.r1.tmp motion.r1.txt") os.system("mv motion.r2.tmp motion.r2.txt") os.system("mv motion.r5.tmp motion.r5.txt") output.close() ########################################################################### ################# Full aircraft functionality here ######################## ########################################################################### elif num_rotors == 6: coll_delta = list( [dr1_coll, dr2_coll, dr3_coll, dr4_coll, dr5_coll, dr6_coll]) output = open("trim_output.log", "a") r5_avg = rdisk_avg[4, 8] r6_avg = rdisk_avg[5, 8] if trim_meth == 1 or trim_meth == 2: print >> output, "CD_TARGET [Total Drag] = " + str( fomoco_target[0] + nondim_extra) print >> output, "CM_TARGET [Total PM] = " + str( fomoco_target[4]) print >> output, "r5_avg [Torque] = " + str(r5_cq) print >> output, "r6_avg [Torque] = " + str(r6_cq) print >> output, "\n" elif trim_meth == 3: print >> output, "fomoco_target(0) [Total A/C Lift] = " + str( fomoco_target[0]) print >> output, "fomoco_target(4) [Total A/C PM] = " + str( fomoco_target[4]) print >> output, "\n" print >> output, 'Old collectives' for i in range(num_rotors): print >> output, str(rotor_names[i]) + ": " + str(coll_old[i]) print >> output, "\n" print >> output, 'Delta collectives' for i in range(num_rotors): print >> output, str(rotor_names[i]) + ": " + str(coll_delta[i]) print >> output, "\n" coll_new = [] for i in range(num_rotors): coll_new.append(coll_old[i] + coll_delta[i]) print >> output, 'New collectives' for i in range(num_rotors): print >> output, str(rotor_names[i]) + ": " + str(coll_new[i]) # Write new rotor files using f90nml os.system('rm *.inp2') for i in range(len(rotor_files)): rdisk_input = parser.read(rotor_files[i]) rdisk_input['rdisk_trim']['a0'] = coll_new[i] name = rotor_files[i].replace('.inp', '.inp2') rdisk_input.write(name) os.system('cp ' + name + ' ' + str(rotor_files[i])) # COPY TEMPORARY FILES FOR NEXT INTERATION os.system('cp rotor.r1.inp2 rotor.r1.inp') os.system('cp rotor.r2.inp2 rotor.r2.inp') os.system('cp rotor.r3.inp2 rotor.r3.inp') os.system('cp rotor.r4.inp2 rotor.r4.inp') os.system('cp rotor.r5.inp2 rotor.r5.inp') os.system('cp rotor.r6.inp2 rotor.r6.inp') output.close() return coll_old, coll_new, coll_delta
def getparm(path_to_namelist, usef90nml=True, flatten=True): """ Read in Namelist file to a dictionary as strings or floats """ if usef90nml: import f90nml parser = f90nml.Parser() parser.comment_tokens += '#' mydata = parser.read(path_to_namelist) # Want to flatten by removing namelist sections like "parm01, parm02, parm03....? if flatten: myparms = {} for k in mydata.keys(): myparms = dict(myparms, **mydata[k]) else: myparms = mydata.todict() else: # Dont use f90nml (it works with data.diagnostics if fields(1:15,1) etc type statements are used) myparms = {} it = 0 file = open(path_to_namelist, 'r').readlines() while it < len(file) - 1: key = [] key1 = [] value = [] line, _, comment = file[it].partition('#') if line.strip(): # non-blank line line, _, comment = line.partition('&') if line.strip(): # non-blank line key1, _, value = line.partition('=') key, _, loc = key1.strip(' .,\)\#').partition('(') if key == 'fields': # Data.diagnostics specific # Do some looking ahead to see if there are variables on the next line while file[it + 1].find('=') == -1: it += 1 #Increment the counter line, _, comment = file[it].partition('#') if line.strip(): # non-blank line line, _, comment = line.partition('&') if line.strip(): # non-blank line value = value.strip(' \"\n') + line.strip( ' \t') try: if key.strip().lower() in myparms.keys( ): # append value to a key in myvars myparms[key.strip().lower()].append( np.float(value.strip(' ,.\'\"\n'))) else: # Cannot append to a key that doesnt exist so create it as an array myparms[key.strip().lower()] = [ np.float(value.strip(' ,.\'\"\n')) ] except ValueError: if key.strip().lower() == 'fields': if key.strip().lower() in myparms.keys( ): # append value to a key in myvars myparms[key.strip().lower()].append( value.strip(' ,.\'\"\n').strip('\'').strip( '\ ').replace(' ', '').split('\',\'')) else: # Cannot append to a key that doesnt exist so create it as an array myparms[key.strip().lower()] = [ value.strip(' ,.\'\"\n').strip('\'').strip( '\ ').replace(' ', '').split('\',\'') ] else: if key.strip().lower() in myparms.keys( ): # append value to a key in myvars myparms[key.strip().lower()].append( value.strip().strip(' ,.\'\"\n')) else: # Cannot append to a key that doesnt exist so create it as an array myparms[key.strip().lower()] = [ value.strip().strip(' ,.\'\"\n') ] # Increment the counter it += 1 return myparms
def test_dense_arrays(self): parser = f90nml.Parser() test_nml = parser.read('sparse_array.nml') self.assertEqual(self.dense_array_nml, test_nml)