def test_patch_case(self): patch_nml = f90nml.read('types_patch.nml') f90nml.patch('types_uppercase.nml', patch_nml, 'tmp.nml') test_nml = f90nml.read('tmp.nml') try: self.assertEqual(test_nml, patch_nml) finally: os.remove('tmp.nml')
def test_default_patch(self): patch_nml = f90nml.read("types_patch.nml") f90nml.patch("types.nml", patch_nml) test_nml = f90nml.read("types.nml~") try: self.assertEqual(test_nml, patch_nml) finally: os.remove("types.nml~")
def calc_species_param(species_list,species_filename,norm_filename): norm_file = f90nml.read(norm_filename) eBar = norm_file["normalizationParameters"]["eBar"] mBar = norm_file["normalizationParameters"]["mBar"] species_file = f90nml.read(species_filename) Z=numpy.array([species_file["speciesCharge"][x] for x in species_list])/eBar mHat=numpy.array([species_file["speciesMass"][x] for x in species_list])/mBar return [Z,mHat]
def prepare_wrfda_namelist(self, domain): # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) # read WRFDA namelist wrfda_namelist = os.path.join(self.config['filesystem']['wrfda_dir'], 'var/test/tutorial/namelist.input') wrfda_nml = f90nml.read(wrfda_namelist) # read WRF namelist in WRF work_dir wrf_nml = f90nml.read(os.path.join(self.config['filesystem']['wrf_run_dir'], 'namelist.input')) ## silent remove file if exists ##utils.silentremove(os.path.join(wrfda_workdir, 'fg')) ## create symlink of wrfinput_d0${domain} ##os.symlink(os.path.join(self.rundir, 'wrfinput_d0' + str(domain)), ## os.path.join(wrfda_workdir, 'fg')) # set domain specific information in namelist for var in ['e_we', 'e_sn', 'e_vert', 'dx', 'dy']: # get variable from ${RUNDIR}/namelist.input var_value = wrf_nml['domains'][var] # set domain specific variable in WRDFA_WORKDIR/namelist.input wrfda_nml['domains'][var] = var_value[domain - 1] for var in ['mp_physics', 'ra_lw_physics', 'ra_sw_physics', 'radt', 'sf_sfclay_physics', 'sf_surface_physics', 'bl_pbl_physics', 'cu_physics', 'cudt', 'num_soil_layers']: # get variable from ${RUNDIR}/namelist.input var_value = wrf_nml['physics'][var] # set domain specific variable in WRDFA_WORKDIR/namelist.input try: wrfda_nml['physics'][var] = var_value[domain - 1] except TypeError: wrfda_nml['physics'][var] = var_value obsproc_nml = f90nml.read(os.path.join(self.obs[domain][0], 'namelist.obsproc')) # sync wrfda namelist with obsproc namelist wrfda_nml['wrfvar18']['analysis_date'] = obsproc_nml['record2']['time_analysis'] wrfda_nml['wrfvar21']['time_window_min'] = obsproc_nml['record2']['time_window_min'] wrfda_nml['wrfvar22']['time_window_max'] = obsproc_nml['record2']['time_window_max'] if check_cv5(): wrfda_nml['wrfvar7']['cv_options'] = 5 wrfda_nml['wrfvar6']['max_ext_its'] = 2 wrfda_nml['wrfvar5']['check_max_iv'] = True else: wrfda_nml['wrfvar7']['cv_options'] = 3 tana = utils.return_validate(obsproc_nml['record2']['time_analysis'][:-6]) wrfda_nml['time_control']['start_year'] = tana.year wrfda_nml['time_control']['start_month'] = tana.month wrfda_nml['time_control']['start_day'] = tana.day wrfda_nml['time_control']['start_hour'] = tana.hour wrfda_nml['time_control']['end_year'] = tana.year wrfda_nml['time_control']['end_month'] = tana.month wrfda_nml['time_control']['end_day'] = tana.day wrfda_nml['time_control']['end_hour'] = tana.hour # save changes to wrfda_nml utils.silentremove(os.path.join(wrfda_workdir, 'namelist.input')) wrfda_nml.write(os.path.join(wrfda_workdir, 'namelist.input'))
def test_default_patch(self): patch_nml = f90nml.read('types_patch.nml') f90nml.patch('types.nml', patch_nml) test_nml = f90nml.read('types.nml~') try: self.assertEqual(test_nml, patch_nml) finally: os.remove('types.nml~') # The above behavior is only for paths, not files with open('types.nml') as nml_file: self.assertRaises(ValueError, f90nml.patch, nml_file, patch_nml)
def test_patch_files(self): patch_nml = f90nml.read('types_patch.nml') with open('types.nml') as f_in: with open('tmp.nml', 'w') as f_out: f90nml.patch(f_in, patch_nml, f_out) self.assertFalse(f_in.closed) self.assertFalse(f_out.closed) try: test_nml = f90nml.read('tmp.nml') self.assertEqual(test_nml, patch_nml) finally: os.remove('tmp.nml')
def read_pars(parpath): par_key="parameter" name_key="name" unit_key="units" long_name_key="long_name" out_name_key="out_name" result=[] parf=f90nml.read(parpath) pars=parf[par_key] for p in pars: name=p.get(name_key,None) if(not name): print "ERROR: cannot read parameter without valid name from par-table" continue oname=p.get(out_name_key,None) if(not oname): print "ERROR: cannot read parameter",name,"without valid output name from par-table" continue lname=p.get(long_name_key,None) unit=p.get(unit_key,None) item=nemo_par(name) item.long_name=lname item.out_name=oname item.unit=unit result.append(item) print "parsing parameter table ",parpath print "done, read",len(result),"entries" return result
def get_obsproc_dirs(self): ''' get list of observation names and workdirs for obsproc ''' # read WRF namelist in WRF work_dir wrf_nml = f90nml.read(os.path.join(self.config['filesystem']['wrf_run_dir'], 'namelist.input')) # initialize variables obsnames, obsproc_workdirs = [], [] # for dom in range(1, self.max_dom + 1): try: obsname = self.config['filesystem']['obs_filename_d' + str(dom)] obsnames.append(obsname) obsproc_workdirs.append(os.path.join( self.config['filesystem']['work_dir'], 'obsproc', obsname)) except KeyError: obsname = self.config['filesystem']['obs_filename'] obsnames.append(obsname) obsproc_workdirs.append(os.path.join( self.config['filesystem']['work_dir'], 'obsproc', obsname)) # merge everything into a dict # domain: (workdir, obsname) obs = dict(zip(range(1, self.max_dom + 1), zip(obsproc_workdirs, obsnames))) return obs
def test_bit_repro_repeat(self): """ Test that a run reproduces saved checksums. """ exp_bit_repo1 = setup_exp_from_base('1deg_jra55_iaf', '1deg_jra55_iaf_bit_repo1') exp_bit_repo2 = setup_exp_from_base('1deg_jra55_iaf', '1deg_jra55_iaf_bit_repo2') # Reconfigure to a 1 day and do run for exp in [exp_bit_repo1, exp_bit_repo2]: with open(exp.accessom2_config) as f: nml = f90nml.read(f) nml['date_manager_nml']['restart_period'] = [0, 0, 86400] nml.write(exp.accessom2_config, force=True) exp.build_and_run() # Compare expected to produced. assert os.path.exists(exp_bit_repo1.accessom2_out_000) expected = self.checksums_to_list(exp_bit_repo1.accessom2_out_000) expected.sort() assert os.path.exists(exp_bit_repo2.accessom2_out_000) produced = self.checksums_to_list(exp_bit_repo2.accessom2_out_000) produced.sort() if produced != expected: with open('checksums-produced-test_bit_repo.txt', 'w') as f: f.write('\n'.join(produced)) with open('checksums-expected-test_bit_repo.txt', 'w') as f: f.write('\n'.join(expected)) assert len(produced) > 0 assert len(produced) == len(expected) assert produced == expected
def extract_input_file(self): """ Extract the GS2 input file from the NetCDF file to the run dir.. """ # Taken from extract_input_file in the GS2 scripts folder: #1: Get the input_file variable from the netcdf file #2: Only print lines between '${VAR} = "' and '" ;' # (i.e. ignore header and footer) #3: Convert \\n to new lines #4: Delete empty lines #5: Ignore first line #6: Ignore last line #7: Fix " style quotes #8: Fix ' style quotes bash_extract_input = (""" ncdump -v input_file ${FILE} | """ + """ sed -n '/input_file = /,/" ;/p' | """ + """ sed 's|\\\\\\\\n|\\n|g' | """ + """ sed '/^ *$/d' | """ + """ tail -n+2 | """ + """ head -n-2 | """ + """ sed 's|\\\\\\"|\\"|g' | """ + """ sed "s|\\\\\\'|\\'|g" """) os.system('FILE=' + self.cdf_file + '; ' + bash_extract_input + ' > ' + self.run_dir + 'input_file.in') self.gs2_in = nml.read(self.run_dir + 'input_file.in')
def set_model_pathnames(self): super(Cice, self).set_model_pathnames() self.build_exec_path = os.path.join(self.codebase_path, 'build_access-om_360x300_6p') ice_nml_path = os.path.join(self.control_path, self.ice_nml_fname) self.ice_in = f90nml.read(ice_nml_path) # Assume local paths are relative to the work path setup_nml = self.ice_in['setup_nml'] res_path = os.path.normpath(setup_nml['restart_dir']) if not os.path.isabs(res_path): res_path = os.path.join(self.work_path, res_path) self.work_init_path = res_path self.work_restart_path = res_path work_out_path = os.path.normpath(setup_nml['history_dir']) if not os.path.isabs(work_out_path): work_out_path = os.path.join(self.work_path, work_out_path) self.work_output_path = work_out_path # Determine if there is a work input path grid_nml = self.ice_in['grid_nml'] input_path, _ = os.path.split(grid_nml['grid_file']) if input_path and not input_path == '.': assert not os.path.isabs(input_path) self.work_input_path = os.path.join(self.work_path, input_path) # Assert that kmt uses the same directory kmt_input_path, _ = os.path.split(grid_nml['kmt_file']) assert input_path == kmt_input_path
def test_bit_repro_historical(self): """ Test that a run reproduces saved checksums. """ exp_bit_repo = setup_exp_from_base('1deg_jra55_iaf', '1deg_jra55_iaf_bit_repo') # Reconfigure to a 1 day and do run with open(exp_bit_repo.accessom2_config) as f: nml = f90nml.read(f) nml['date_manager_nml']['restart_period'] = [0, 0, 86400] nml.write(exp_bit_repo.accessom2_config, force=True) exp_bit_repo.build_and_run() assert os.path.exists(exp_bit_repo.accessom2_out_000) produced = self.checksums_to_list(exp_bit_repo.accessom2_out_000) # Compare expected to produced. test_stdout = os.path.join(exp_bit_repo.exp_path, 'test', 'access-om2.out') assert os.path.exists(test_stdout) expected = self.checksums_to_list(test_stdout) assert len(produced) > 0 for line in produced: if line not in expected: with open('checksums-produced-test_bit_repo.txt', 'w') as f: f.write('\n'.join(produced)) with open('checksums-expected-test_bit_repo.txt', 'w') as f: f.write('\n'.join(expected))
def test_repatch(self): f90nml.patch('repatch.nml', self.repatch_nml, 'tmp.nml') test_nml = f90nml.read('tmp.nml') try: self.assertEqual(test_nml, self.repatch_nml) finally: os.remove('tmp.nml')
def test_colwidth(self): test_nml = f90nml.read("multiline.nml") test_nml.colwidth = 40 self.assert_write(test_nml, "multiline_colwidth.nml") self.assertRaises(ValueError, setattr, test_nml, "colwidth", -1) self.assertRaises(TypeError, setattr, test_nml, "colwidth", "xyz")
def prepare_symlink_files(self, domain): ''' prepare WRFDA directory ''' # set domain specific workdir wrfda_workdir = os.path.join(self.wrfda_workdir, "d0" + str(domain)) # read obsproc namelist obsproc_nml = f90nml.read(os.path.join(self.obs[domain][0], 'namelist.obsproc')) # symlink da_wrfvar.exe, LANDUSE.TBL, be.dat.cv3 os.symlink(os.path.join( self.config['filesystem']['wrfda_dir'],'var/da/da_wrfvar.exe' ), os.path.join(wrfda_workdir, 'da_wrfvar.exe')) if self.check_cv5(): # cv5: os.symlink(self.config['options_wrfda']['be.dat_d0' + str(domain)], os.path.join(wrfda_workdir, 'be.dat')) else: # cv3 os.symlink(os.path.join( self.config['filesystem']['wrfda_dir'],'var/run/be.dat.cv3' ), os.path.join(wrfda_workdir, 'be.dat')) os.symlink(os.path.join( self.config['filesystem']['wrfda_dir'],'run/LANDUSE.TBL' ), os.path.join(wrfda_workdir, 'LANDUSE.TBL')) # symlink output of obsproc os.symlink(os.path.join(self.obs[domain][0], 'obs_gts_' + obsproc_nml['record2']['time_analysis'] + '.3DVAR', ), os.path.join(wrfda_workdir, 'ob.ascii'))
def test_column_width(self): test_nml = f90nml.read('multiline.nml') test_nml.column_width = 40 self.assert_write(test_nml, 'multiline_colwidth.nml') self.assertRaises(ValueError, setattr, test_nml, 'column_width', -1) self.assertRaises(TypeError, setattr, test_nml, 'column_width', 'xyz')
def setup(self): # FMS initialisation super(Mom, self).setup() input_nml_path = os.path.join(self.work_path, 'input.nml') input_nml = f90nml.read(input_nml_path) use_core2iaf = self.config.get('core2iaf') if use_core2iaf: self.core2iaf_setup() # Set the runtime if self.expt.runtime: ocean_solo_nml = input_nml['ocean_solo_nml'] ocean_solo_nml['years'] = self.expt.runtime['years'] ocean_solo_nml['months'] = self.expt.runtime['months'] ocean_solo_nml['days'] = self.expt.runtime['days'] ocean_solo_nml['seconds'] = self.expt.runtime.get('seconds', 0) input_nml.write(input_nml_path, force=True) # Construct the land CPU mask if self.expt.config.get('mask_table', False): self.create_mask_table(input_nml)
def _prepare_namelist(self, datestart, dateend): ''' prepare wps namelist ''' # read WPS namelist in WPS work_dir wps_nml = f90nml.read(self.config['options_wps']['namelist.wps']) # get numer of domains ndoms = wps_nml['share']['max_dom'] # check if ndoms is an integer and >0 if not (isinstance(ndoms, int) and ndoms>0): raise ValueError("'domains_max_dom' namelist variable should be an " \ "integer>0") # check if both datestart and dateend are a datetime instance if not all([ isinstance(dt, datetime) for dt in [datestart, dateend] ]): raise TypeError("datestart and dateend must be an instance of datetime") # set new datestart and dateend wps_nml['share']['start_date'] = [datetime.strftime(datestart, '%Y-%m-%d_%H:%M:%S')] * ndoms wps_nml['share']['end_date'] = [datetime.strftime(dateend, '%Y-%m-%d_%H:%M:%S')] * ndoms # write namelist in wps work_dir utils.silentremove(os.path.join( self.config['filesystem']['work_dir'], 'wps', 'namelist.wps')) wps_nml.write(os.path.join( self.config['filesystem']['work_dir'], 'wps', 'namelist.wps'))
def read_namelist(self): ''' read user supplied namelist ''' self.nml = f90nml.read(self.namelist) # get list of namelist keys self.keys = self.nml.keys()
def get_value_from_input_or_defaults(self,groupname,varname): varname = varname.lower() groupname = groupname.lower() inputs = f90nml.read(self.input_name) if not varname in inputs[groupname].keys(): return Sfincs_input.defaults[groupname][varname] else: return inputs[groupname][varname]
def set_timestep(self, timestep): input_nml_path = os.path.join(self.work_path, 'input.nml') input_nml = f90nml.read(input_nml_path) input_nml['ocean_model_nml']['dt_ocean'] = timestep input_nml.write(input_nml_path, force=True)
def create_species(normalization,filename="species",database_filename = os.path.abspath(__file__).rsplit("/",1)[0] + "/species_database.namelist"): """ creates a Species object from a .csv file containing species, and a Normalization object""" filename="." + "/" + filename names=[x.strip() for x in open(filename,'r').read().split('\n')[0].split(',')] database = f90nml.read(database_filename) Zs = np.array([database["speciesCharge"][x] for x in names])/normalization.eBar mHats = np.array([database["speciesMass"][x] for x in names])/normalization.mBar return Species(Zs,mHats,names)
def set_timestep(self, t_step): namcpl_path = os.path.join(self.work_path, 'namcouple') namcpl = Namcouple(namcpl_path, 'access') namcpl.set_ice_ocean_coupling_timestep(str(t_step)) namcpl.write() for model in self.expt.models: if model.model_type in ('cice', 'cice5'): # Set namcouple timesteps ice_ts = model.config.get('timestep') if ice_ts: model.set_oasis_timestep(ice_ts) # Set ACCESS coupler timesteps input_ice_path = os.path.join(model.work_path, 'input_ice.nml') input_ice = f90nml.read(input_ice_path) input_ice['coupling_nml']['dt_cpl_io'] = t_step input_ice.write(input_ice_path, force=True) elif model.model_type == 'matm': input_atm_path = os.path.join(model.work_path, 'input_atm.nml') input_atm = f90nml.read(input_atm_path) input_atm['coupling']['dt_atm'] = t_step input_atm.write(input_atm_path, force=True) elif model.model_type == 'mom': input_nml_path = os.path.join(model.work_path, 'input.nml') input_nml = f90nml.read(input_nml_path) input_nml['auscom_ice_nml']['dt_cpl'] = t_step input_nml['ocean_solo_nml']['dt_cpld'] = t_step input_nml.write(input_nml_path, force=True)
def obsproc_init(self, datestart): ''' Sync obsproc namelist with WRF namelist.input ''' from shutil import copyfile from datetime import timedelta from datetime import datetime # convert to unique list obslist = list(set(self.obs.values())) # read WRF namelist in WRF work_dir wrf_nml = f90nml.read(os.path.join(self.config['filesystem']['wrf_run_dir'], 'namelist.input')) for obs in obslist: # read obsproc namelist obsproc_nml = f90nml.read(os.path.join(self.obsproc_dir, 'namelist.obsproc.3dvar.wrfvar-tut')) # create obsproc workdir self.create_obsproc_dir(obs[0]) # copy observation in LITTLE_R format to obsproc_dir shutil.copyfile(os.path.join( self.config['filesystem']['obs_dir'], obs[1]), os.path.join(obs[0], obs[1])) # sync obsproc namelist variables with wrf namelist.input obsproc_nml['record1']['obs_gts_filename'] = obs[1] obsproc_nml['record8']['nesti'] = wrf_nml['domains']['i_parent_start'] obsproc_nml['record8']['nestj'] = wrf_nml['domains']['j_parent_start'] obsproc_nml['record8']['nestix'] = wrf_nml['domains']['e_we'] obsproc_nml['record8']['nestjx'] = wrf_nml['domains']['e_sn'] obsproc_nml['record8']['numc'] = wrf_nml['domains']['parent_id'] obsproc_nml['record8']['dis'] = wrf_nml['domains']['dx'] obsproc_nml['record8']['maxnes'] = wrf_nml['domains']['max_dom'] # set time_analysis, time_window_min, time_window_max # check if both datestart and dateend are a datetime instance if not isinstance(datestart, datetime): raise TypeError("datestart must be an instance of datetime") obsproc_nml['record2']['time_analysis'] = datetime.strftime(datestart, '%Y-%m-%d_%H:%M:%S') obsproc_nml['record2']['time_window_min'] = datetime.strftime( datestart - timedelta(minutes=15), '%Y-%m-%d_%H:%M:%S') obsproc_nml['record2']['time_window_max'] = datetime.strftime( datestart + timedelta(minutes=15), '%Y-%m-%d_%H:%M:%S') # save obsproc_nml utils.silentremove(os.path.join(obs[0], 'namelist.obsproc')) obsproc_nml.write(os.path.join(obs[0], 'namelist.obsproc'))
def init_config(self): """Patch input.nml as a new or restart run.""" input_fpath = os.path.join(self.work_path, 'input.nml') input_nml = f90nml.read(input_fpath) input_type = 'n' if self.expt.counter == 0 else 'r' input_nml['GOLD_input_nml']['input_filename'] = input_type f90nml.write(input_nml, input_fpath, force=True)
def set_access_timestep(self, t_step): # TODO: Figure out some way to move this to the ACCESS driver # Re-read ice timestep and move this over there self.set_local_timestep(t_step) input_ice_path = os.path.join(self.work_path, 'input_ice.nml') input_ice = f90nml.read(input_ice_path) input_ice['coupling_nml']['dt_cice'] = t_step input_ice.write(input_ice_path, force=True)
def read(self, filename, clear=False): """ Read input from file """ from f90nml import read from ..misc import local_path if clear: self.__inputs.clear() filename = local_path(filename) dictionary = read(str(filename)) for key, value in dictionary.items(): setattr(self, key, value)
def get_max_dom(): ''' get maximum domain number from WRF namelist.input ''' import f90nml from config import config CONFIG = config() CONFIG.__init__() # load config wrf_nml = f90nml.read(os.path.join(CONFIG.config['filesystem']['wrf_run_dir'], 'namelist.input')) # maximum domain number return wrf_nml['domains']['max_dom']
def test_indent(self): test_nml = f90nml.read('types.nml') test_nml.indent = 2 self.assert_write(test_nml, 'types_indent_2.nml') test_nml.indent = '\t' self.assert_write(test_nml, 'types_indent_tab.nml') self.assertRaises(ValueError, setattr, test_nml, 'indent', -4) self.assertRaises(ValueError, setattr, test_nml, 'indent', 'xyz') self.assertRaises(TypeError, setattr, test_nml, 'indent', [1, 2, 3])
def test_print_group(self): nml = f90nml.read('types.nml') stdout = StringIO() print(nml['types_nml'], file=stdout) stdout.seek(0) source_str = stdout.read().rstrip('\n') stdout.close() target_str = repr(nml['types_nml']) self.assertEqual(source_str, target_str)
def test_multidim(self): test_nml = f90nml.read('multidim.nml') self.assertEqual(self.multidim_nml, test_nml) self.assert_write(test_nml, 'multidim_target.nml')
del get_versions # default parameters and cannot be changed after module load _magiccpath, _magiccbinary = MAGICC6().original_dir, MAGICC6().original_dir if not _config['is_windows']: wine_installed = subprocess.call("type wine", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0 if not wine_installed: logging.warning("Wine is not installed") _config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "default_config.nml") default_config = f90nml.read(_config_path) # MAGICC's scenario files encode the used regions as follows. region_codes = { 11: ['WORLD'], 20: ['WORLD', "OECD90", "REF", "ASIA", "ALM"], 21: ['WORLD', "OECD90", "REF", "ASIA", "ALM"], 31: ['WORLD', "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM"], 41: ['WORLD', "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM", "BUNKERS"] } # Order of columns to use when writing scenario files. _columns = [ u'YEARS', u'FossilCO2', u'OtherCO2', u'CH4', u'N2O', u'SOx', u'CO', u'NMVOC', u'NOx', u'BC', u'OC', u'NH3', u'CF4', u'C2F6', u'C6F14', u'HFC23', u'HFC32', u'HFC43-10', u'HFC125', u'HFC134a', u'HFC143a',
def test_end_comma(self): test_nml = f90nml.read('types.nml') test_nml.end_comma = True self.assert_write(test_nml, 'types_end_comma.nml') self.assertRaises(TypeError, setattr, test_nml, 'end_comma', 'xyz')
from datetime import datetime, timedelta import pathlib from typing import Union import f90nml # type: ignore[import] import pytz from pyschism.forcing.atmosphere.nws import NWS from pyschism.forcing.atmosphere.nws.nws2 import NWS2 from pyschism.enums import Coriolis PARAM_TEMPLATE = pathlib.Path(__file__).parent / 'param.nml.template' PARAM_DEFAULTS = f90nml.read(PARAM_TEMPLATE)['opt'] class OptMeta(type): def __new__(meta, name, bases, attrs): for key, value in PARAM_DEFAULTS.items(): if key not in attrs: if isinstance(value, list): attrs[key] = len(value) * [0] else: attrs[key] = None return type(name, bases, attrs) class Dramp: def __set__(self, obj, dramp: Union[int, float, timedelta, None]): if not isinstance(dramp, (int, float, timedelta, type(None))): raise TypeError("Argument drampbc must be an int, float, "
def execute(args, job_args): """ Executes a weather/fire simulation. :param args: a dictionary with all to start the simulationfollowing keys :param job_args: a the original json given the forecast Keys in args: :param grid_code: the (unique) code of the grid that is used :param sys_install_path: system installation directory :param start_utc: start time of simulation in UTC :param end_utc: end time of simulation in UTC :param workspace_path: workspace directory :param wps_install_path: installation directory of WPS that will be used :param wrf_install_path: installation directory of WRF that will be used :param grib_source: a string identifying a valid GRIB2 source :param wps_namelist_path: the path to the namelist.wps file that will be used as template :param wrf_namelist_path: the path to the namelist.input file that will be used as template :param fire_namelist_path: the path to the namelist.fire file that will be used as template :param wps_geog_path: the path to the geogrid data directory providing terrain/fuel data :param email_notification: dictionary containing keys address and events indicating when a mail should be fired off """ # step 0 initialize the job state from the arguments js = JobState(args) jobdir = osp.abspath(osp.join(js.workspace_path, js.job_id)) make_clean_dir(jobdir) json.dump(job_args, open(osp.join(jobdir, 'input.json'), 'w'), indent=4, separators=(',', ': ')) jsub = make_job_file(js) json.dump(jsub, open(jsub.jobfile, 'w'), indent=4, separators=(',', ': ')) logging.info("job %s starting [%d hours to forecast]." % (js.job_id, js.fc_hrs)) sys.stdout.flush() send_email(js, 'start', 'Job %s started.' % js.job_id) # read in all namelists js.wps_nml = f90nml.read(js.args['wps_namelist_path']) js.wrf_nml = f90nml.read(js.args['wrf_namelist_path']) js.fire_nml = f90nml.read(js.args['fire_namelist_path']) js.ems_nml = None if 'emissions_namelist_path' in js.args: js.ems_nml = f90nml.read(js.args['emissions_namelist_path']) # Parse and setup the domain configuration js.domain_conf = WPSDomainConf(js.domains) num_doms = len(js.domain_conf) js.wps_nml['share']['start_date'] = [utc_to_esmf(js.start_utc)] * num_doms js.wps_nml['share']['end_date'] = [utc_to_esmf(js.end_utc)] * num_doms js.wps_nml['share']['interval_seconds'] = 3600 logging.info("number of domains defined is %d." % num_doms) # build directories in workspace js.wps_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wps')) js.wrf_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wrf')) #check_obj(args,'args') #check_obj(js,'Initial job state') # step 1: clone WPS and WRF directories logging.info("cloning WPS into %s" % js.wps_dir) cln = WRFCloner(js.args) cln.clone_wps(js.wps_dir, js.grib_source.vtables(), []) # step 2: process domain information and patch namelist for geogrid js.wps_nml['geogrid']['geog_data_path'] = js.args['wps_geog_path'] js.domain_conf.prepare_for_geogrid(js.wps_nml, js.wrf_nml, js.wrfxpy_dir, js.wps_dir) f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True) # do steps 2 & 3 & 4 in parallel (two execution streams) # -> GEOGRID -> # -> GRIB2 download -> UNGRIB -> proc_q = Queue() geogrid_proc = Process(target=run_geogrid, args=(js, proc_q)) grib_proc = Process(target=retrieve_gribs_and_run_ungrib, args=(js, proc_q)) logging.info('starting GEOGRID and GRIB2/UNGRIB') geogrid_proc.start() grib_proc.start() # wait until both tasks are done logging.info('waiting until both tasks are done') grib_proc.join() geogrid_proc.join() if proc_q.get() != 'SUCCESS': return if proc_q.get() != 'SUCCESS': return proc_q.close() # step 5: execute metgrid after ensuring all grids will be processed js.domain_conf.prepare_for_metgrid(js.wps_nml) f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True) logging.info("running METGRID") Metgrid(js.wps_dir).execute().check_output() send_email(js, 'metgrid', 'Job %s - metgrid complete.' % js.job_id) logging.info("cloning WRF into %s" % js.wrf_dir) # step 6: clone wrf directory, symlink all met_em* files, make namelists cln.clone_wrf(js.wrf_dir, []) symlink_matching_files(js.wrf_dir, js.wps_dir, "met_em*") time_ctrl = update_time_control(js.start_utc, js.end_utc, num_doms) js.wrf_nml['time_control'].update(time_ctrl) update_namelist(js.wrf_nml, js.grib_source.namelist_keys()) if 'ignitions' in js.args: update_namelist(js.wrf_nml, render_ignitions(js, num_doms)) # if we have an emissions namelist, automatically turn on the tracers if js.ems_nml is not None: logging.debug('namelist.fire_emissions given, turning on tracers') f90nml.write(js.ems_nml, osp.join(js.wrf_dir, 'namelist.fire_emissions'), force=True) js.wrf_nml['dynamics']['tracer_opt'] = [2] * num_doms f90nml.write(js.wrf_nml, osp.join(js.wrf_dir, 'namelist.input'), force=True) f90nml.write(js.fire_nml, osp.join(js.wrf_dir, 'namelist.fire'), force=True) # step 7: execute real.exe logging.info("running REAL") # try to run Real twice as it sometimes fails the first time # it's not clear why this error happens try: Real(js.wrf_dir).execute().check_output() except Exception as e: logging.error('Real step failed with exception %s, retrying ...' % str(e)) Real(js.wrf_dir).execute().check_output() # step 7b: if requested, do fuel moisture DA if js.fmda is not None: logging.info('running fuel moisture data assimilation') for dom in js.fmda.domains: assimilate_fm10_observations( osp.join(wrf_dir, 'wrfinput_d%02d' % dom), None, js.fmda.token) # step 8: execute wrf.exe on parallel backend logging.info('submitting WRF job') send_email(js, 'wrf_submit', 'Job %s - wrf job submitted.' % js.job_id) js.task_id = "sim-" + js.grid_code + "-" + utc_to_esmf(js.start_utc)[:10] jsub.job_num = WRF(js.wrf_dir, js.qsys).submit(js.task_id, js.num_nodes, js.ppn, js.wall_time_hrs) send_email( js, 'wrf_exec', 'Job %s - wrf job starting now with id %s.' % (js.job_id, js.task_id)) logging.info( "WRF job %s submitted with id %s, waiting for rsl.error.0000" % (jsub.job_num, js.task_id)) jobfile = osp.abspath(osp.join(js.workspace_path, js.job_id, 'job.json')) json.dump(jsub, open(jobfile, 'w'), indent=4, separators=(',', ': ')) process_output(js.job_id)
def test_unset(self): test_nml = f90nml.read('unset.nml') self.assertEqual(self.unset_nml, test_nml) self.assert_write(test_nml, 'unset.nml')
def test_pop_key(self): test_nml = f90nml.read('empty.nml') test_nml.pop('empty_nml') self.assertEqual(test_nml, f90nml.namelist.Namelist())
def test_grp_repeat(self): test_nml = f90nml.read('grp_repeat.nml') self.assertEqual(self.grp_repeat_nml, test_nml) self.assert_write(test_nml, 'grp_repeat_target.nml')
def test_comment(self): test_nml = f90nml.read('comment.nml') self.assertEqual(self.comment_nml, test_nml) self.assert_write(test_nml, 'comment_target.nml')
def test_dollar(self): test_nml = f90nml.read('dollar.nml') self.assertEqual(self.dollar_nml, test_nml) self.assert_write(test_nml, 'dollar_target.nml')
def test_f77(self): test_nml = f90nml.read('f77.nml') self.assertEqual(self.f77_nml, test_nml) self.assert_write(test_nml, 'f77_target.nml')
def test_multiline_index(self): test_nml = f90nml.read('multiline_index.nml') self.assertEqual(self.multiline_nml, test_nml) self.assert_write(test_nml, 'multiline_index.nml')
def test_ext_token(self): test_nml = f90nml.read('ext_token.nml') self.assertEqual(self.ext_token_nml, test_nml)
def test_write_existing_file(self): tmp_fname = 'tmp.nml' open(tmp_fname, 'w').close() test_nml = f90nml.read('empty.nml') self.assertRaises(IOError, test_nml.write, tmp_fname) os.remove(tmp_fname)
def test_vector(self): test_nml = f90nml.read('vector.nml') self.assertEqual(self.vector_nml, test_nml) self.assert_write(test_nml, 'vector_target.nml')
def test_null(self): test_nml = f90nml.read('null.nml') self.assertEqual(self.null_nml, test_nml) self.assert_write(test_nml, 'null_target.nml')
def test_types(self): test_nml = f90nml.read('types.nml') self.assertEqual(self.types_nml, test_nml) self.assert_write(test_nml, 'types.nml')
def test_empty_nml(self): test_nml = f90nml.read('empty.nml') self.assertEqual(self.empty_nml, test_nml) self.assert_write(test_nml, 'empty.nml')
def test_bcast(self): test_nml = f90nml.read('bcast.nml') self.assertEqual(self.bcast_nml, test_nml) self.assert_write(test_nml, 'bcast_target.nml')
print(" USAGE: {0:s} <File1> <File2>".format(ScriptName)) print( " USAGE: <File1> and <File2> are the paths to the two RAMSIN files being compared" ) sys.exit(1) RamsFname1 = sys.argv[1] RamsFname2 = sys.argv[2] print("Comparing two RAMSIN files:") print(" File1: {0:s}".format(RamsFname1)) print(" File2: {0:s}".format(RamsFname2)) print("") # Apply the parser to the two files Rnml1 = f90nml.read(RamsFname1) Rnml2 = f90nml.read(RamsFname2) # Data is stored in python dictionary form. The first level of keys are # the namelist sections (model_grid, model_file_info, etc.), and the # second level of keys are the varibles within each section. # Get the group names Rnml1Groups = list(Rnml1.keys()) Rnml2Groups = list(Rnml2.keys()) # Find the common groups and those unique to each file groups Only1Groups, Only2Groups, CommGroups = FindListDiffs(Rnml1Groups, Rnml2Groups) print("Namelist groups unique to File1:") for Group in Only1Groups:
def test_dtype_case(self): test_nml = f90nml.read('dtype_case.nml') self.assertEqual(self.dtype_case_nml, test_nml) self.assert_write(test_nml, 'dtype_case_target.nml')
def test_string_multiline(self): test_nml = f90nml.read('string_multiline.nml') self.assertEqual(self.string_multiline_nml, test_nml)
def test_empty_file(self): test_nml = f90nml.read('empty_file') self.assertEqual(self.empty_file, test_nml)
def test_string(self): test_nml = f90nml.read('string.nml') self.assertEqual(self.string_nml, test_nml) self.assert_write(test_nml, 'string_target.nml')
import scipy.io.netcdf as netcdf import f90nml plt.ion() dir0 = '../run/' file1 = 'state.0000000000.t001.nc' if len(sys.argv) > 1: dir1 = dir0 + 'mnc_test_' + str(format(sys.argv[1])).zfill(4) + '/' # physical parameters gg = 9.81 # m/s^2 sbeta = 7.4e-4 # psu^-1 nml = f90nml.read(dir0 + 'data') nmldiag = f90nml.read(dir0 + 'data.diagnostics') dt = nml['parm03']['dumpfreq'] f1 = netcdf.netcdf_file(dir1 + file1, 'r') tt = f1.variables['T'][:].copy() xx = f1.variables['X'][:].copy() zz = f1.variables['Z'][:].copy() si_t = len(tt) si_x = len(xx) si_z = len(zz) s = f1.variables['S'][:, :, :, :].copy().squeeze()
def test_float(self): test_nml = f90nml.read('float.nml') self.assertEqual(self.float_nml, test_nml) self.assert_write(test_nml, 'float_target.nml')
def setup(self): if not self.top_level_model: return cpl_keys = { 'cice': ('input_ice.nml', 'coupling', 'runtime0'), 'matm': ('input_atm.nml', 'coupling', 'truntime0') } # Keep track of this in order to set the oasis runtime. run_runtime = 0 for model in self.expt.models: if model.model_type == 'cice' or model.model_type == 'cice5': # Horrible hack to make a link to o2i.nc in the # work/ice/RESTART directory f_name = 'o2i.nc' f_src = os.path.join(model.work_path, f_name) f_dst = os.path.join(model.work_restart_path, f_name) if os.path.isfile(f_src): make_symlink(f_src, f_dst) if model.model_type == 'cice5': # Stage the supplemental input files if model.prior_restart_path: for f_name in model.access_restarts: f_src = os.path.join(model.prior_restart_path, f_name) f_dst = os.path.join(model.work_input_path, f_name) if os.path.isfile(f_src): make_symlink(f_src, f_dst) if model.model_type in ('cice', 'matm'): # Update the supplemental OASIS namelists cpl_fname, cpl_group, runtime0_key = cpl_keys[model.model_type] cpl_fpath = os.path.join(model.work_path, cpl_fname) cpl_nml = f90nml.read(cpl_fpath) # Which calendar are we using, noleap or Gregorian. caltype = cpl_nml[cpl_group]['caltype'] init_date = cal.int_to_date(cpl_nml[cpl_group]['init_date']) # Get time info about the beginning of this run. We're # interested in: # 1. start date of run # 2. total runtime of all previous runs. if model.prior_restart_path and not self.expt.repeat_run: prior_cpl_fpath = os.path.join(model.prior_restart_path, cpl_fname) # With later versions this file exists in the prior restart # path, but this was not always the case, so check, and if # not there use prior output path if not os.path.exists(prior_cpl_fpath): print('payu: warning: {0} missing from prior restart ' 'path; checking prior output.'.format(cpl_fname), file=sys.stderr) if not os.path.isdir(model.prior_output_path): print('payu: error: No prior output path; ' 'aborting run.') sys.exit(errno.ENOENT) prior_cpl_fpath = os.path.join(model.prior_output_path, cpl_fname) try: prior_cpl_nml = f90nml.read(prior_cpl_fpath) except IOError as exc: if exc.errno == errno.ENOENT: print('payu: error: {0} does not exist; aborting.' ''.format(prior_cpl_fpath), file=sys.stderr) sys.exit(exc.errno) else: raise cpl_nml_grp = prior_cpl_nml[cpl_group] # The total time in seconds since the beginning of # the experiment. total_runtime = int(cpl_nml_grp[runtime0_key] + cpl_nml_grp['runtime']) run_start_date = cal.date_plus_seconds( init_date, total_runtime, caltype) else: total_runtime = 0 run_start_date = init_date # Get new runtime for this run. We get this from either the # 'runtime' part of the payu config, or from the namelist if self.expt.runtime: run_runtime = cal.runtime_from_date( run_start_date, self.expt.runtime['years'], self.expt.runtime['months'], self.expt.runtime['days'], self.expt.runtime.get('seconds', 0), caltype) else: run_runtime = cpl_nml[cpl_group]['runtime'] # Now write out new run start date and total runtime. cpl_nml[cpl_group]['inidate'] = cal.date_to_int(run_start_date) cpl_nml[cpl_group][runtime0_key] = total_runtime cpl_nml[cpl_group]['runtime'] = int(run_runtime) if model.model_type == 'cice': if self.expt.counter and not self.expt.repeat_run: cpl_nml[cpl_group]['jobnum'] = 1 + self.expt.counter else: cpl_nml[cpl_group]['jobnum'] = 1 nml_work_path = os.path.join(model.work_path, cpl_fname) f90nml.write(cpl_nml, nml_work_path + '~') shutil.move(nml_work_path + '~', nml_work_path) # Now change the oasis runtime. This needs to be done after the others. for model in self.expt.models: if model.model_type == 'oasis': namcouple = os.path.join(model.work_path, 'namcouple') s = '' with open(namcouple, 'r+') as f: s = f.read() m = re.search(r"^[ \t]*\$RUNTIME.*?^[ \t]*(\d+)", s, re.MULTILINE | re.DOTALL) assert (m is not None) s = s[:m.start(1)] + str(run_runtime) + s[m.end(1):] with open(namcouple, 'w') as f: f.write(s)
def test_rowmaj_multidim(self): test_nml = f90nml.read('multidim.nml', row_major=True) self.assertEqual(self.md_rowmaj_nml, test_nml)
def determine_namelist_differences( source_namelist, target_namelist, bad_chapters=["set_stream", "mvstreamctl", "set_stream_element"], ): """ Determines differences in namelists Parameters ---------- source_namelist : str The "source" namelist to use. This is "yours" target_namelist : str The "target" namelist to use. This is "the other one" bad_chapters : list A list of strings with chapter names that should not be compared Returns ------- namelist_diffs : list A list of strings containing diff information; to be printed later. """ source_nml = f90nml.read(source_namelist) target_nml = f90nml.read(target_namelist) for chapter in bad_chapters: for this_nml in source_nml, target_nml: if chapter in this_nml: del this_nml[chapter] common_chapters, unique_source_chapters, unique_target_chapters = determine_identical_and_unique_elements( source_nml, target_nml) namelist_diffs = ["\n", os.path.basename(source_namelist)] namelist_diffs.append(80 * "-") for this_chapter in common_chapters: namelist_diffs.append("&" + this_chapter) entry_diffs = [] common_entries, unique_source_entries, unique_target_entries = determine_identical_and_unique_elements( source_nml[this_chapter], target_nml[this_chapter]) for this_entry in common_entries: source_nml_value = source_nml[this_chapter][this_entry] target_nml_value = target_nml[this_chapter][this_entry] if source_nml_value != target_nml_value: entry_diffs.append("\t\t Source: %s: %s" % (this_entry, source_nml_value)) entry_diffs.append("\t\t Target: %s: %s" % (this_entry, target_nml_value)) if unique_source_entries: entry_diffs.append("\n\t\t Unique to Source:") for this_entry in unique_source_entries: entry_diffs.append( "\t\t %s: %s" % (this_entry, source_nml[this_chapter][this_entry])) if unique_target_entries: entry_diffs.append("\n\t\t Unique to Target:") for this_entry in unique_target_entries: entry_diffs.append( "\t\t %s: %s" % (this_entry, target_nml[this_chapter][this_entry])) if entry_diffs: namelist_diffs += entry_diffs else: namelist_diffs.append("\n\t\t All entries are the same!") namelist_diffs.append("\\") for unique_chapters, nml, tag in zip( [unique_source_chapters, unique_target_chapters], [source_nml, target_nml], ["Source", "Target"], ): if unique_chapters: for chapter in unique_chapters: namelist_diffs.append( "\n\t\t The following chapter is unique to %s" % tag) namelist_diffs.append("&" + chapter) for entry, value in nml[chapter].items(): namelist_diffs.append("\t\t %s: %s" % (entry, value)) return namelist_diffs
def test_no_selfpatch(self): patch_nml = f90nml.read('types_patch.nml') self.assertRaises(ValueError, f90nml.patch, 'types.nml', patch_nml, 'types.nml')