def test_comment_patch(self): nml = {"comment_nml": {"v_cmt_inline": 456}} try: f90nml.patch("comment.nml", nml, "tmp.nml") self.assert_file_equal("comment_patch.nml", "tmp.nml") finally: os.remove("tmp.nml")
def test_repatch(self): f90nml.patch('repatch.nml', self.repatch_nml, 'tmp.nml') test_nml = f90nml.read('tmp.nml') try: self.assertEqual(test_nml, self.repatch_nml) finally: os.remove('tmp.nml')
def test_comment_patch(self): nml = {'comment_nml': {'v_cmt_inline': 456}} try: f90nml.patch('comment.nml', nml, 'tmp.nml') self.assert_file_equal('comment_patch.nml', 'tmp.nml') finally: os.remove('tmp.nml')
def test_default_patch(self): patch_nml = f90nml.read("types_patch.nml") f90nml.patch("types.nml", patch_nml) test_nml = f90nml.read("types.nml~") try: self.assertEqual(test_nml, patch_nml) finally: os.remove("types.nml~")
def test_patch_case(self): patch_nml = f90nml.read('types_patch.nml') f90nml.patch('types_uppercase.nml', patch_nml, 'tmp.nml') test_nml = f90nml.read('tmp.nml') try: self.assertEqual(test_nml, patch_nml) finally: os.remove('tmp.nml')
def run_model(): MPAS_SEAICE_TESTCASES_RUN_COMMAND = os.environ.get( 'MPAS_SEAICE_TESTCASES_RUN_COMMAND') if (MPAS_SEAICE_TESTCASES_RUN_COMMAND is None): MPAS_SEAICE_TESTCASES_RUN_COMMAND = "" advectionMethods = ["IR", "upwind"] #advectionMethods = ["IR"] icTypes = ["cosine_bell", "slotted_cylinder"] #icTypes = ["cosine_bell"] gridSizes = [2562, 10242, 40962, 163842] #gridSizes = [2562] for advectionMethod in advectionMethods: print("Advection method: ", advectionMethod) for icType in icTypes: print(" IC type: ", icType) for gridSize in gridSizes: print(" Gridsize: ", gridSize) os.system("rm grid.nc ic.nc namelist.seaice") os.system("ln -s grid.%i.nc grid.nc" % (gridSize)) os.system("ln -s ic_%s_%i.nc ic.nc" % (icType, gridSize)) if (advectionMethod == "IR"): nmlPatch = { "advection": { "config_advection_type": "incremental_remap" } } elif (advectionMethod == "upwind"): nmlPatch = { "advection": { "config_advection_type": "upwind" } } f90nml.patch("namelist.seaice.advection", nmlPatch, "namelist.seaice.%s" % (advectionMethod)) os.system("rm -rf output_%s_%s_%i" % (advectionMethod, icType, gridSize)) os.system("ln -s namelist.seaice.%s namelist.seaice" % (advectionMethod)) os.system("%s ../../../seaice_model" % (MPAS_SEAICE_TESTCASES_RUN_COMMAND)) os.system("mv output output_%s_%s_%i" % (advectionMethod, icType, gridSize))
def create_new_namelist(filenameIn, filenameOut, nmlPatch): try: import f90nml except ImportError: print("Module f90nml needed and not available") sys.exit() f90nml.patch(filenameIn, nmlPatch, filenameOut)
def create_new_namelist(filenameIn, filenameOut, nmlPatch): try: import f90nml except ImportError: print "Module f90nml needed and not available" sys.exit() f90nml.patch(filenameIn, nmlPatch, filenameOut)
def write(self, path, overwrite=False, use_template=False): path = pathlib.Path(path) if path.is_file() and not overwrite: raise IOError(f"File {path} exists and overwrite=False") if use_template: f90nml.patch(PARAM_TEMPLATE, self.to_dict(), path) else: with open(path, 'w') as f: f.write(str(self))
def run_model(): MPAS_SEAICE_TESTCASES_RUN_COMMAND = os.environ.get( 'MPAS_SEAICE_TESTCASES_RUN_COMMAND') if (MPAS_SEAICE_TESTCASES_RUN_COMMAND is None): MPAS_SEAICE_TESTCASES_RUN_COMMAND = "" operatorMethods = ["wachspress", "pwl", "weak"] gridTypes = ["hex"] for gridType in gridTypes: print("Grid type: ", gridType) for operatorMethod in operatorMethods: print(" Operator Method: ", operatorMethod) if (operatorMethod == "wachspress"): nmlPatch = { "velocity_solver": { "config_strain_scheme": "variational", "config_stress_divergence_scheme": "variational", "config_variational_basis": "wachspress" } } elif (operatorMethod == "pwl"): nmlPatch = { "velocity_solver": { "config_strain_scheme": "variational", "config_stress_divergence_scheme": "variational", "config_variational_basis": "pwl" } } elif (operatorMethod == "weak"): nmlPatch = { "velocity_solver": { "config_strain_scheme": "weak", "config_stress_divergence_scheme": "weak" } } f90nml.patch("namelist.seaice.strain", nmlPatch, "namelist.seaice.%s" % (operatorMethod)) os.system("rm -rf namelist.seaice streams.seaice output_%s_%s" % (gridType, operatorMethod)) os.system("ln -s namelist.seaice.%s namelist.seaice" % (operatorMethod)) os.system("ln -s streams.seaice.strain streams.seaice") os.system("%s ../../../seaice_model" % (MPAS_SEAICE_TESTCASES_RUN_COMMAND)) os.system("mv output output_%s_%s" % (gridType, operatorMethod))
def run_model(): MPAS_SEAICE_TESTCASES_RUN_COMMAND = os.environ.get('MPAS_SEAICE_TESTCASES_RUN_COMMAND') if (MPAS_SEAICE_TESTCASES_RUN_COMMAND is None): MPAS_SEAICE_TESTCASES_RUN_COMMAND = "" gridSizes = [2562, 10242, 40962, 163842] operatorMethods = ["wachspress","pwl","weak","wachspress_alt","pwl_alt"] for operatorMethod in operatorMethods: print("Operator Method: ", operatorMethod) for gridSize in gridSizes: print(" Gridsize: ", gridSize) os.system("rm grid.nc ic.nc") os.system("ln -s grid.%i.nc grid.nc" %(gridSize)) os.system("ln -s ic_%i.nc ic.nc" %(gridSize)) if (operatorMethod == "wachspress"): nmlPatch = {"velocity_solver": {"config_strain_scheme":"variational", "config_stress_divergence_scheme":"variational", "config_variational_basis":"wachspress", "config_variational_denominator_type":"original"}} elif (operatorMethod == "pwl"): nmlPatch = {"velocity_solver": {"config_strain_scheme":"variational", "config_stress_divergence_scheme":"variational", "config_variational_basis":"pwl", "config_variational_denominator_type":"original"}} elif (operatorMethod == "weak"): nmlPatch = {"velocity_solver": {"config_strain_scheme":"weak", "config_stress_divergence_scheme":"weak"}} elif (operatorMethod == "wachspress_alt"): nmlPatch = {"velocity_solver": {"config_strain_scheme":"variational", "config_stress_divergence_scheme":"variational", "config_variational_basis":"wachspress", "config_variational_denominator_type":"alternate"}} elif (operatorMethod == "pwl_alt"): nmlPatch = {"velocity_solver": {"config_strain_scheme":"variational", "config_stress_divergence_scheme":"variational", "config_variational_basis":"pwl", "config_variational_denominator_type":"alternate"}} f90nml.patch("namelist.seaice.stress_divergence", nmlPatch, "namelist.seaice.%s.%i" %(operatorMethod, gridSize)) os.system("rm -rf namelist.seaice streams.seaice output_%s_%i" %(operatorMethod, gridSize)) os.system("ln -s namelist.seaice.%s.%i namelist.seaice" %(operatorMethod, gridSize)) os.system("ln -s streams.seaice.stress_divergence streams.seaice") os.system("%s ../../../../seaice_model" %(MPAS_SEAICE_TESTCASES_RUN_COMMAND)) os.system("mv output output_%s_%i" %(operatorMethod, gridSize))
def run_model(): operatorMethods = ["wachspress", "pwl", "weak"] #subcycleNumbers = [120,240,480,960,1920,3840,7680] subcycleNumbers = [120] for operatorMethod in operatorMethods: print("Operator Method: ", operatorMethod) for subcycleNumber in subcycleNumbers: print(" Subcycle number: ", subcycleNumber) if (operatorMethod == "wachspress"): nmlPatch = { "velocity_solver": { "config_strain_scheme": "variational", "config_stress_divergence_scheme": "variational", "config_variational_basis": "wachspress", "config_elastic_subcycle_number": subcycleNumber } } elif (operatorMethod == "pwl"): nmlPatch = { "velocity_solver": { "config_strain_scheme": "variational", "config_stress_divergence_scheme": "variational", "config_variational_basis": "pwl", "config_elastic_subcycle_number": subcycleNumber } } elif (operatorMethod == "weak"): nmlPatch = { "velocity_solver": { "config_strain_scheme": "weak", "config_stress_divergence_scheme": "weak", "config_elastic_subcycle_number": subcycleNumber } } f90nml.patch( "namelist.seaice.square", nmlPatch, "namelist.seaice.%s.%i" % (operatorMethod, subcycleNumber)) os.system( "rm -rf namelist.seaice streams.seaice output_hex_%s_%i" % (operatorMethod, subcycleNumber)) os.system("ln -s namelist.seaice.%s.%i namelist.seaice" % (operatorMethod, subcycleNumber)) os.system("ln -s streams.seaice.square streams.seaice") os.system("../../../../../seaice_model") os.system("mv output output_hex_%s_%i" % (operatorMethod, subcycleNumber))
def writePatchNamelist(nmlbaseDict, nmlPatch, outputD, override=True): # input: # # nmlbaseDict: dictionary variable that is the namelisti # nmlPatch: patch to the namelist # outputD: output directory namelistNewV = os.path.join(outputD, "NAMELIST") if os.path.isfile(namelistNewV) and override: nml.patch(nmlbaseDict, nmlPatch, namelistNewV)
def run_and_compare(cases_dir, path_to_exes, is_patch=False): excluded_cases = ['501', '502'] excluded_platforms = ['Darwin'] precursor_sims = ['501'] driver_sims = ['502'] for case_path in sorted(cases_dir): case_id = case_path.stem if case_id in excluded_cases: if platform.system() in excluded_platforms: print(f'Skipping tests for case {case_id} on {excluded_platforms}') continue print(f'Running tests for example {case_id}') if is_patch: test_case_dir = PROJ_DIR / 'examples'/ case_id else: test_case_dir = case_path outputs_case_dir = PROJ_DIR / 'tests' / 'outputs' / test_case_dir.name # Always start afresh. shutil.rmtree(outputs_case_dir, ignore_errors=True) model_output_dirs = [] for path_to_exe in path_to_exes: # Create path to out folder model_output_dir = outputs_case_dir / path_to_exe.name shutil.copytree(test_case_dir, model_output_dir) if is_patch: # Apply test namelist patches to examples to reduce runtime nml = model_output_dir / f'namoptions.{case_id}' nml_patch = f90nml.read(case_path) nml_patched = model_output_dir / f'namoptions.{case_id}.patch' f90nml.patch(nml, nml_patch, nml_patched) namelist = nml_patched.name else: namelist = f'namoptions.{case_id}' # For driver sims we need to copy all files in first from the precursor simulation. if case_id in driver_sims: for f_name in (model_output_dir.parents[1] / '501' / model_output_dir.name).glob('*driver*'): shutil.copy(f_name, model_output_dir.parents[1] / '502' / model_output_dir.name) run_udales(path_to_exe, namelist, model_output_dir, model_output_dirs) # We do not compare precursor sims if not case_id in precursor_sims: # TODO: concatenate filedumps? compare_outputs.compare(model_output_dirs[0] / f'fielddump.000.{test_case_dir.name}.nc', model_output_dirs[1] / f'fielddump.000.{test_case_dir.name}.nc', model_output_dirs[0].parent)
def test_default_patch(self): patch_nml = f90nml.read('types_patch.nml') f90nml.patch('types.nml', patch_nml) test_nml = f90nml.read('types.nml~') try: self.assertEqual(test_nml, patch_nml) finally: os.remove('types.nml~') # The above behavior is only for paths, not files with open('types.nml') as nml_file: self.assertRaises(ValueError, f90nml.patch, nml_file, patch_nml)
def editnamelist(self, namelistfile, namelist, param, value): """ Edit a parameter within one of the setup's namelists. """ patch = {namelist: {param: value}} namelistpath = os.path.join(self.input, namelistfile) oldnamelistpath = namelistpath + '_temp' shutil.copy(namelistpath, oldnamelistpath) os.remove(namelistpath) f90nml.patch(oldnamelistpath, patch, namelistpath) os.remove(oldnamelistpath)
def test_patch_files(self): patch_nml = f90nml.read('types_patch.nml') with open('types.nml') as f_in: with open('tmp.nml', 'w') as f_out: f90nml.patch(f_in, patch_nml, f_out) self.assertFalse(f_in.closed) self.assertFalse(f_out.closed) try: test_nml = f90nml.read('tmp.nml') self.assertEqual(test_nml, patch_nml) finally: os.remove('tmp.nml')
def _write_file(self, outputfile, rawvals, do_patch=False, nml_file=None): patch=defaultdict( dict ) for key,rawval in rawvals.items(): if rawval is None: # omit if value is None continue if isinstance(rawval,numpy.ndarray): rawval=list(rawval) # necessary until f90nml supports numpy arrays patch[key[1]][key[0]]=rawval if do_patch: f90nml.patch(nml_file or self._nml_file,patch,outputfile) else: f90nml.write(patch, outputfile, force=True)
def filter_inp_file_f90nml(inp_file_in, inp_file_out, replace_with_these): """ Replaces elements in *inp_file_in* and places the result in *inp_file_out* replace_with_these is a dict with entries of the form {'Name of parameter':*value to replace with*} This version works on namelist files using the f90nml package """ import f90nml patch = patch_from_tupled_dict(replace_with_these) if patch: f90nml.patch(inp_file_in, patch, inp_file_out) else: copy(inp_file_in, inp_file_out)
def update_namelist(self, namelist_update): """update values in a WRF namelist, save old namelist file as a backup The existing namelist file is copied to a new file with the current date appended to the file name (in format YYYY-MM-DD_hhmm) ARGS: namelist_update (dict): the new values to be placed in the namelist file. Should be formatted as {category: {item: value}}. example: update_namelist({'domains': {'time_step": 100}}) """ fname_bak = self.fname + pd.Timestamp.now().strftime(".%Y-%m-%d_%H%M") shutil.copy(self.fname, fname_bak) f90nml.patch(fname_bak, namelist_update, self.fname)
def __init__(self, **options): #print('__init__') self.stopping_conditions = StoppingConditions(self) self.itot = 0 self.ktot = 0 self.latitudes = None self.longitudes = None self.exp_name = 'TEST' CommonCode.__init__(self, OpenIFSInterface(**options), **options) #if workdir is given, assume inputfile, backupfile are relative to workdir #TODO should store the changed file names - they are used in cleanup_code inputfile = OpenIFS.inputfile backupfile = OpenIFS.backupfile if 'workdir' in options: # print('OpenIFS.__init__() : setting workdir.') self.set_workdir(options['workdir']) inputfile = os.path.join(options['workdir'], inputfile) backupfile = os.path.join(options['workdir'], backupfile) if not os.path.exists(inputfile): print('inputfile:' + inputfile) print('cwd:' + os.getcwd()) raise Exception( "File fort.4 not found. Creating an openIFS model from scratch is not supported yet." ) else: os.rename(inputfile, backupfile) self.params = f90nml.read(backupfile) self.patch = { "NAMPAR0": { "NPROC": options.get("number_of_workers", 1) } } restart_steps = options.get("restart_steps", None) if restart_steps: self.patch["NAMRES"] = {"NFRRES": restart_steps} f90nml.patch(backupfile, self.patch, inputfile) print('Done patching openIFS input file', inputfile)
def _write_segment_namrun_namelist(run_desc, namelist_namrun_patch, tmp_run_desc_dir): """ :param dict run_desc: Run description dictionary. :param dict namelist_namrun_patch: f90nml patch for namrun namelist for the segment. :param tmp_run_desc_dir: Temporary directory where the namelists and run description files for segments are stored. :type tmp_run_desc_dir: :py:class:`pathlib.Path` :return: File path and name of namelist section file containing namrun namelist for the segment. :rtype: :py:class:`pathlib.Path` """ namelist_namrun = get_run_desc_value( run_desc, ("segmented run", "namelists", "namrun"), expand_path=True) f90nml.patch(namelist_namrun, namelist_namrun_patch, tmp_run_desc_dir / namelist_namrun.name) return tmp_run_desc_dir / namelist_namrun.name
def patch_namelist(orig_nml_path: str, patch_nml_path: str, new_nml_path: str): """This function updates a larger orginal namelist with a file containing a smaller subset of changes and writes out a new namelist to a file. Args: orig_nml_path: Path to the namelist file to be updated patch_nml_path: Path to the file containing the namelist updates new_nml_path: Path to write the new namelist file with updates applied. Returns: None """ # Read in namelist patch patch_nml = f90nml.read(nml_path=patch_nml_path) # Write new namelist to file f90nml.patch(nml_path=orig_nml_path, nml_patch=patch_nml, out_path=new_nml_path) #print('New namelist written to ' + new_nml_path) return (None)
def _patch_namelist(namelist_path, patch): """ :param :py:class:`pathlib.Path` namelist_path: :param dict patch: """ # f90nml insists on writing the patched namelist to a file, # so we use an ephemeral temporary file with tempfile.TemporaryFile("wt") as tmp_patched_namelist: nml = f90nml.patch(namelist_path, patch, tmp_patched_namelist) with namelist_path.open("wt") as patched_nameslist: nml.write(patched_nameslist) logger.debug(f"patched namelist: {namelist_path}")
def _edit_namelist_time(sftp_client, host_name, prev_namelist_info, run_date, run_days, config): """ :param :py:class:`paramiko.sftp_client.SFTPClient` sftp_client: :param str host_name: :param :py:class:`types.SimpleNamespace` prev_namelist_info: :param :py:class:`arrow.Arrow` run_date: :param int run_days: :param :py:class:`nemo_nowcast.Config` config: """ timesteps_per_day = 24 * 60 * 60 / prev_namelist_info.rdt itend = prev_namelist_info.itend + run_days * timesteps_per_day nn_stocklist = [0] * 10 if run_days < 28: nn_stocklist[0] = int(itend) else: nn_stocklist[0:3] = [ int(prev_namelist_info.itend + timesteps_per_day * 10), int(prev_namelist_info.itend + timesteps_per_day * 20), int(itend), ] patch = { "namrun": { "nn_it000": prev_namelist_info.itend + 1, "nn_itend": int(itend), "nn_date0": int(run_date.format("YYYYMMDD")), "nn_stocklist": nn_stocklist, } } run_prep_dir = Path( config["run"]["hindcast hosts"][host_name]["run prep dir"]) namelist_time_tmpl = f"{run_prep_dir}/namelist.time" sftp_client.get(namelist_time_tmpl, "/tmp/hindcast.namelist.time") logger.debug(f"downloaded {host_name}:{run_prep_dir}/namelist.time") f90nml.patch("/tmp/hindcast.namelist.time", patch, "/tmp/patched_hindcast.namelist.time") logger.debug("patched namelist.time") sftp_client.put("/tmp/patched_hindcast.namelist.time", f"{run_prep_dir}/namelist.time") logger.debug(f"uploaded new {host_name}:{run_prep_dir}/namelist.time")
def _edit_namelist_times(sftp_client, host_name, prev_run_namelists_info, run_date, config): """ :param :py:class:`paramiko.sftp_client.SFTPClient` sftp_client: :param str host_name: :param :py:class:`types.SimpleNamespace` prev_run_namelists_info: :param :py:class:`arrow.Arrow` run_date: :param :py:class:`nemo_nowcast.Config` config: """ itend = prev_run_namelists_info.itend + 24 * 60 * 60 / prev_run_namelists_info.rdt patches = { "namelist.time": { "namrun": { "nn_it000": prev_run_namelists_info.itend + 1, "nn_itend": int(itend), "nn_date0": int(run_date.format("YYYYMMDD")), } }, "namelist.time.BS": { "namrun": { "nn_date0": int(run_date.format("YYYYMMDD")) } }, } run_prep_dir = Path( config["run"]["enabled hosts"][host_name]["run prep dir"]) for i, namelist in enumerate(patches): sftp_client.get(os.fspath(run_prep_dir / namelist), f"/tmp/nowcast-agrif.{namelist}") logger.debug(f"downloaded {host_name}:{run_prep_dir/namelist}") f90nml.patch( f"/tmp/nowcast-agrif.{namelist}", patches[namelist], f"/tmp/patched_nowcast-agrif.{namelist}", ) logger.debug(f"patched {namelist}") sftp_client.put(f"/tmp/patched_nowcast-agrif.{namelist}", os.fspath(run_prep_dir / namelist)) logger.debug(f"uploaded new {host_name}:{run_prep_dir/namelist}")
def run_teb(path_to_case_dir: Path, path_to_exe: Path, patch_nml) -> None: # Sanity check if not list(path_to_case_dir.glob('*.nml')): raise RuntimeError(f'Namelist not found in {path_to_case_dir}') if not (path_to_case_dir / 'input').exists(): raise RuntimeError(f'Input folder not found in {path_to_case_dir}') if not (path_to_exe).exists(): raise RuntimeError(f'{path_to_exe} not found.') if patch_nml: path_to_namelist = path_to_case_dir / 'input.nml' path_to_unpatched_namelist = path_to_case_dir / 'input.nml_unpatched' shutil.move(path_to_namelist, path_to_unpatched_namelist) f90nml.patch(str(path_to_unpatched_namelist), patch_nml, str(path_to_namelist)) # TEB needs the output folder to be present. Path(path_to_case_dir / 'output').mkdir(parents=True, exist_ok=True) case_name = path_to_case_dir.stem with open(path_to_case_dir / str(case_name + '.log'), 'w') as f: print('Running case: ' + case_name) subprocess.check_call([path_to_exe], cwd=path_to_case_dir, stdout=f)
def _patch_namelist(path: str, patch: dict, delete_vars: List[str] = None) -> None: ''' Patch an existing namelist file, retaining any formatting and comments. ''' # f90nml does not create a patch file if the patch is empty if not patch: return assert os.path.exists(path), path patch_path = path + '.tmp' # TODO set indentation to 0 (patch APIs don't support it currently, see https://github.com/marshallward/f90nml/issues/79) f90nml.patch(path, patch, patch_path) assert os.path.exists(patch_path), patch_path if delete_vars: # work-around until f90nml.patch supports deletion, see https://github.com/marshallward/f90nml/issues/77 with open(patch_path, 'r') as fp: lines = fp.readlines() with open(patch_path, 'w') as fp: for line in lines: if any(var_name in line for var_name in delete_vars): continue fp.write(line) os.remove(path) os.rename(patch_path, path)
def write_namelist_parameters(self, outputfile, do_patch=False, nml_file=None): patch = defaultdict(dict) for p in self._namelist_parameters.values(): name = p["name"] group_name = p["group_name"] group = patch[group_name] short = p["short"] parameter_set_name = p.get("set_name", "parameters_" + group_name) parameter_set = getattr(self, parameter_set_name) if getattr(parameter_set, name) is None: # omit if value is None continue if is_quantity(p["default"]): group[short] = to_quantity(getattr( parameter_set, name)).value_in(p["default"].unit) else: group[short] = getattr(parameter_set, name) if do_patch: f90nml.patch(nml_file or self._nml_file, patch, outputfile) else: f90nml.write(patch, outputfile, force=True)
}, 'parm04': { 'delZ' : [ round(dz, 3) for i in range(nz) ], } }, } # Copy namelists for filename in os.listdir(nametempldir): if filename not in patches.keys(): shutil.copy('{}/{}'.format(nametempldir, filename), '{}/{}'.format(setupdirs['inputdir'], filename)) # Patch namelist for namefile in patches.keys(): f90nml.patch('{}/{}'.format(nametempldir, namefile), patches[namefile], out_path='{}/{}'.format(setupdirs['inputdir'], namefile)) # Read namelists namelists = {} for filename in os.listdir(setupdirs['inputdir']): namelists[filename] = f90nml.read('{}/{}'.format(nametempldir, filename)) viscAz = gentools.sift_nested_dict('viscAz', namelists) print('Vertical viscosity is {}'.format(viscAz)) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Save binary input files gentools.convert_and_save( {'topo' : topo, 'obc_U' : obc_U,
base_nml_filename = experiment_config_dir + '/' + base_experiment_config base_nml = f90nml.read(base_nml_filename + '.nml') if base_nml['experiment_config']: case_name = base_nml['experiment_config']['case_name'] output_dir = base_nml['experiment_config']['output_dir'] output_file = base_nml['experiment_config']['output_file'] for i in range(num_files): patch_nml = { 'experiment_config': { 'case_name': case_name + '_ens' + str(i).zfill(2), 'output_dir': output_dir + '/ens', 'output_file': output_file + '_ens' + str(i).zfill(2) } } f90nml.patch(base_nml_filename + '.nml', patch_nml, base_nml_filename + '_ens' + str(i).zfill(2) + '.nml') ens_experiment_configs.append(base_experiment_config + '_ens' + str(i).zfill(2)) #q.put_nowait((base_experiment_config + '_ens' + str(i).zfill(2),)) #copy over the same model_config shutil.copy( model_config_dir + '/' + base_experiment_config + '.nml', model_config_dir + '/' + base_experiment_config + '_ens' + str(i).zfill(2) + '.nml') else: print('Cannot make an ensemble out of a single timestep case config file') os.chdir('bin') # # start threads
def generate_input(self, input_template): with open(input_template) as nml_file: namelist = f90nml.read(nml_file) namelist.float_format = "1.2E" patch_namelist = f90nml.namelist.Namelist() patch_namelist['theta_grid_knobs'] = {'equilibrium_option': 'eik'} # Decimal places. dp = 3 # Scientific notation format string fs = '{{:1.{}E}}'.format(dp) patch_namelist['theta_grid_parameters'] = { 'rhoc': float(fs.format(round(self.zerod['minrad'].data[0], dp))), 'rmaj': float(fs.format(round(self.zerod['majrad'].data[0], dp))), 'r_geo': float(fs.format(round(self.zerod['rgeo'].data[0], dp))), 'qinp': float(fs.format(round(self.zerod['safety'].data[0], dp))), 'shat': float(fs.format(round(self.zerod['shat'].data[0], dp))), 'shift': float(fs.format(round(self.zerod['shift'].data[0], dp))), 'akappa': float(fs.format(round(self.zerod['elo'].data[0], dp))), 'akappri': float(fs.format(round(self.zerod['delodr'].data[0], dp))), 'tri': float(fs.format(round(self.zerod['tri'].data[0], dp))), 'tripri': float(fs.format(round(self.zerod['dtridr'].data[0], dp))) } patch_namelist['parameters'] = { 'zeff': float(fs.format(round(self.zerod['zeff'].data[0], dp))), 'beta': float(fs.format(self.zerod['beta_ref'].data[0])) } patch_namelist['theta_grid_eik_knobs'] = { 'iflux': 0, 'irho': 2, 'bishop': 4, 's_hat_input': float(fs.format(round(self.zerod['shat'].data[0], dp))), 'beta_prime_input': float(fs.format(round(self.zerod['dpdr_tot'].data[0], dp))) } patch_namelist['species_knobs'] = { 'nspec': int(1 + self.zerod['num_hyd_spec'].data[0] + self.zerod['num_imps'].data[0]) } species_counter = 1 for spec in ['h', 'd', 't', 'e', 'imp1', 'imp2', 'imp3']: dens = 'dens_{}'.format(spec) if 'dens_{}'.format(spec) in self.zerod: if spec == 'e': specType = 'electron' tprim = 'tprim_e' temp = 'temp_e' else: specType = 'ion' tprim = 'tprim_i' temp = 'temp_i' patch_namelist['species_parameters_{}'.format( species_counter )] = { 'z': float( fs.format( round(self.zerod['z_{}'.format(spec)].data[0], dp))), 'mass': float( fs.format( self.zerod['mass_{}'.format(spec)].data[0], dp)), 'dens': float( fs.format( round( self.zerod['dens_{}'.format(spec)].data[0], dp))), 'fprim': float( fs.format( round( self.zerod['fprim_{}'.format( spec)].data[0], dp))), 'temp': float(fs.format(round(self.zerod[temp].data[0], dp))), 'tprim': float(fs.format(round(self.zerod[tprim].data[0], dp))), 'vnewk': float( fs.format(self.zerod['nu_{}'.format(spec)].data[0], dp)), 'type': specType } species_counter = species_counter + 1 patch_namelist['dist_fn_knobs'] = { 'g_exb': float(fs.format(round(self.zerod['g_exb'].data[0], dp))), 'mach': float(fs.format(round(self.zerod['tor_angv'].data[0], dp))) } patch_namelist.float_format = "1.2E" # Save patch. newInputFile = '{}_{}_{}_{}_{}.in'.format(self.shot, self.uid, self.seq, self.t_choice, self.r_choice) f90nml.patch(input_template, patch_namelist, newInputFile) #namelist.patch(patch_namelist) #with open(newInputFile, 'w') as out_nml_file: # nml.write(out_nml_file) print('Successfully patched input file: {}.'.format(newInputFile))
def run_model(): MPAS_SEAICE_TESTCASES_RUN_COMMAND = os.environ.get( 'MPAS_SEAICE_TESTCASES_RUN_COMMAND') if (MPAS_SEAICE_TESTCASES_RUN_COMMAND is None): MPAS_SEAICE_TESTCASES_RUN_COMMAND = "" operatorMethods = ["wachspress", "pwl", "weak"] #operatorMethods = ["wachspress","pwl"] gridTypes = ["hex", "quad"] #gridTypes = ["hex"] grids = { "hex": ["0082x0094", "0164x0188", "0328x0376", "0656x0752"], "quad": ["0080x0080", "0160x0160", "0320x0320", "0640x0640"] } #grids = {"hex" :["0082x0094"], # "quad":["0080x0080"]} for gridType in gridTypes: print("Grid type: ", gridType) for operatorMethod in operatorMethods: print(" Operator Method: ", operatorMethod) for grid in grids[gridType]: print(" Grid: ", grid) os.system("rm grid.nc") os.system("rm ic.nc") os.system("ln -s grid_%s_%s.nc grid.nc" % (gridType, grid)) os.system("ln -s ic_%s_%s.nc ic.nc" % (gridType, grid)) if (operatorMethod == "wachspress"): nmlPatch = { "velocity_solver": { "config_strain_scheme": "variational", "config_stress_divergence_scheme": "variational", "config_variational_basis": "wachspress", "config_variational_denominator_type": "original" } } elif (operatorMethod == "pwl"): nmlPatch = { "velocity_solver": { "config_strain_scheme": "variational", "config_stress_divergence_scheme": "variational", "config_variational_basis": "pwl", "config_variational_denominator_type": "original" } } elif (operatorMethod == "weak"): nmlPatch = { "velocity_solver": { "config_strain_scheme": "weak", "config_stress_divergence_scheme": "weak" } } f90nml.patch("namelist.seaice.stress_divergence", nmlPatch, "namelist.seaice.%s" % (operatorMethod)) os.system( "rm -rf namelist.seaice streams.seaice output_%s_%s_%s" % (gridType, operatorMethod, grid)) os.system("ln -s namelist.seaice.%s namelist.seaice" % (operatorMethod)) os.system( "ln -s streams.seaice.stress_divergence streams.seaice") os.system("%s ../../../../../seaice_model" % (MPAS_SEAICE_TESTCASES_RUN_COMMAND)) os.system("mv output output_%s_%s_%s" % (gridType, operatorMethod, grid))
def test_patch_null(self): try: f90nml.patch('types.nml', {}, 'tmp.nml') self.assert_file_equal('types.nml', 'tmp.nml') finally: os.remove('tmp.nml')
def setup_rundir(self): """Set up run directory for this experiment.""" # Parse case configuration namelist and extract # - output directory # - surface_flux_spec logging.info('Parsing case configuration namelist {0}'.format( self._namelist)) case_nml = f90nml.read(self._namelist) # look for the output_dir variable in the case configuration namelist and use it if it does; # if it doesn't exist, create a default output directory name (from the case and suite names) and create a namelist patch try: output_dir = case_nml['case_config']['output_dir'] custom_output_dir = True except KeyError: # If using the default namelist, no need to include it in the output directory name; if not, need to use write custom namelist in output dir name in case running multiple experiments with the same case and suite but different namelists if self._physics_namelist == default_physics_namelists.get( self._suite): output_dir = 'output_' + self._case + '_' + self._suite else: output_dir = 'output_' + self._case + '_' + self._suite + '_' + os.path.splitext( self._physics_namelist)[0] output_dir_patch_nml = {'case_config': {'output_dir': output_dir}} custom_output_dir = False # check to see if surface fluxes are specified in the case configuration file (default is False) try: surface_flux_spec = case_nml['case_config']['sfc_flux_spec'] except KeyError: surface_flux_spec = False # If surface fluxes are specified for this case, use the SDF modified to use them if surface_flux_spec: logging.info( 'Specified surface fluxes are used for case {0}. Switching to SDF {1} from {2}' .format( self._case, 'suite_' + self._suite + '_prescribed_surface' + '.xml', 'suite_' + self._suite + '.xml')) self._suite = self._suite + '_prescribed_surface' # Create physics_config namelist for experiment configuration file physics_config = { "physics_suite": self._suite, "physics_nml": self._physics_namelist, } physics_config_dict = {"physics_config": physics_config} physics_config_nml = f90nml.namelist.Namelist(physics_config_dict) # Create STANDARD_EXPERIMENT_NAMELIST in the run directory with the case configuration and physics configuration namelists logging.info( 'Creating experiment configuration namelist {0} in the run directory from {1} using {2} and {3} ' .format(STANDARD_EXPERIMENT_NAMELIST, self._namelist, self._suite, self._physics_namelist)) with open(STANDARD_EXPERIMENT_NAMELIST, "w+") as nml_file: case_nml.write(nml_file) with open(STANDARD_EXPERIMENT_NAMELIST, "a") as nml_file: physics_config_nml.write(nml_file) # if using the default output dir name created in this script, patch the experiment namelist with the new output_dir variable if (not custom_output_dir): # GJF TODO: this implementation is clunky; newer versions of f90nml can handle this better, but this works with v0.19 so no need to require newer version f90nml.patch(STANDARD_EXPERIMENT_NAMELIST, output_dir_patch_nml, 'temp.nml') cmd = "mv {0} {1}".format('temp.nml', STANDARD_EXPERIMENT_NAMELIST) execute(cmd) # Link physics namelist to run directory with its original name logging.info('Linking physics namelist {0} to run directory'.format( self._physics_namelist)) if os.path.isfile(self._physics_namelist): os.remove(self._physics_namelist) if not os.path.isfile( os.path.join(PHYSICS_NAMELIST_DIR, self._physics_namelist)): message = 'Physics namelist {0} not found in directory {1}'.format( self._physics_namelist, PHYSICS_NAMELIST_DIR) logging.critical(message) raise Exception(message) cmd = "ln -sf {0} {1}".format( os.path.join(PHYSICS_NAMELIST_DIR, self._physics_namelist), self._physics_namelist) execute(cmd) # Link physics SDF to run directory physics_suite = 'suite_' + self._suite + '.xml' logging.info( 'Linking physics suite {0} to run directory'.format(physics_suite)) if os.path.isfile(physics_suite): os.remove(physics_suite) if not os.path.isfile(os.path.join(PHYSICS_SUITE_DIR, physics_suite)): message = 'Physics suite {0} not found in directory {1}'.format( physics_suite, PHYSICS_SUITE_DIR) logging.critical(message) raise Exception(message) cmd = "ln -sf {0} {1}".format( os.path.join(PHYSICS_SUITE_DIR, physics_suite), physics_suite) execute(cmd) # Parse physics namelist and extract # - oz_phys # - oz_phys_2015 logging.info('Parsing physics namelist {0}'.format( self._physics_namelist)) nml = f90nml.read(self._physics_namelist) # oz_phys try: oz_phys = nml['gfs_physics_nml']['oz_phys'] except KeyError: oz_phys = DEFAULT_OZ_PHYS # oz_phys_2015 try: oz_phys_2015 = nml['gfs_physics_nml']['oz_phys_2015'] except KeyError: oz_phys_2015 = DEFAULT_OZ_PHYS_2015 # Make sure that only one of the two ozone physics options is activated if oz_phys_2015 and oz_phys: message = 'Logic error, both oz_phys and oz_phys_2015 are set to true in the physics namelist' logging.critical(message) raise Exception(message) # Link input data for oz_phys or oz_phys_2015 if os.path.exists(OZ_PHYS_LINK): os.remove(OZ_PHYS_LINK) if oz_phys: logging.info('Linking input data for oz_phys') cmd = 'ln -sf {0} {1}'.format(OZ_PHYS_TARGET, OZ_PHYS_LINK) execute(cmd) elif oz_phys_2015: logging.info('Linking input data for oz_phys_2015') cmd = 'ln -sf {0} {1}'.format(OZ_PHYS_2015_TARGET, OZ_PHYS_LINK) execute(cmd) # Create output directory (delete existing directory) logging.info('Creating output directory {0} in run directory'.format( output_dir)) if os.path.isdir(output_dir): shutil.rmtree(output_dir) os.makedirs(output_dir) # Write experiment configuration file to output directory logging.info( 'Writing experiment configuration {0}.nml to output directory'. format(self._name)) cmd = 'cp {0} {1}'.format( STANDARD_EXPERIMENT_NAMELIST, os.path.join(output_dir, self._name + '.nml')) execute(cmd)
def ensemble(yamlfile='ensemble.yaml', test=False): """ Create and run an ensemble by varying only one parameter at a time. """ # alternatively, could loop over all values of all parameters using `itertools.product` # see https://stackoverflow.com/questions/1280667/in-python-is-there-an-easier-way-to-write-6-nested-for-loops indata = yaml.load(open(yamlfile, 'r'), Loader=yaml.SafeLoader) template = indata['template'] templatepath = os.path.join(os.getcwd(), template) templaterepo = git.Repo(templatepath) startfrom = str(indata['startfrom']).strip().lower().zfill(3) ensemble = [] # paths to ensemble members for fname, nmls in indata['namelists'].items(): for group, names in nmls.items(): for name, values in names.items(): turningangle = [fname, group, name] == [ 'ice/cice_in.nml', 'dynamics_nml', 'turning_angle' ] for v in values: exppath = os.path.join(os.getcwd(), '_'.join([template, name, str(v)])) relexppath = os.path.relpath(exppath, os.getcwd()) expname = os.path.basename(relexppath) if os.path.exists(exppath): print('\n -- not creating', relexppath, '- already exists') ensemble.append(exppath) continue # first check whether this set of parameters differs from template with open(os.path.join(templatepath, fname)) as template_nml_file: nml = f90nml.read(template_nml_file) if turningangle: cosw = np.cos(v * np.pi / 180.) sinw = np.sin(v * np.pi / 180.) skip = nml[group]['cosw'] == cosw \ and nml[group]['sinw'] == sinw else: skip = nml[group][name] == v if skip: print('\n -- not creating', relexppath, '- parameters are identical to', template) continue print('\ncreating', relexppath) # clone template, fix up git remotes, set up new branch exprepo = templaterepo.clone(exppath) exprepo.remotes.origin.rename('template') exprepo.create_remote('origin', templaterepo.remotes.origin.url) # TODO: first checkout commit corresponding to restart? exprepo.git.checkout('HEAD', b=expname) # switch to a new branch # perturb parameters fpath = os.path.join(exppath, fname) if turningangle: f90nml.patch(fpath, {group: { 'cosw': cosw }}, fpath + '_tmp2') f90nml.patch(fpath + '_tmp2', {group: { 'sinw': sinw }}, fpath + '_tmp') os.remove(fpath + '_tmp2') else: # general case f90nml.patch(fpath, {group: {name: v}}, fpath + '_tmp') os.rename(fpath + '_tmp', fpath) if not exprepo.is_dirty( ): # additional check in case of match after roundoff print(' *** deleting', relexppath, '- parameters are identical to', template) shutil.rmtree(exppath) continue # set SYNCDIR in sync_data.sh sdpath = os.path.join(exppath, 'sync_data.sh') with open(sdpath + '_tmp', 'w') as wf: with open(sdpath, 'r') as rf: for line in rf: if line.startswith('SYNCDIR='): syncbase = os.path.dirname( line[len('SYNCDIR='):]) syncdir = os.path.join(syncbase, expname) wf.write('SYNCDIR=' + syncdir + '\n') else: wf.write(line) os.rename(sdpath + '_tmp', sdpath) if os.path.exists(syncdir): print(' *** deleting', relexppath, '- SYNCDIR', syncdir, 'already exists') shutil.rmtree(exppath) continue if startfrom != 'rest': # create archive symlink if not test: subprocess.run('cd ' + exppath + ' && payu sweep && payu setup', check=False, shell=True) workpath = os.path.realpath( os.path.join(exppath, 'work')) subprocess.run('cd ' + exppath + ' && payu sweep', check=True, shell=True) else: # simulate effect of payu setup (for testing without payu) workpath = os.path.realpath( os.path.join('test', 'work', expname)) os.makedirs(workpath) os.symlink(workpath, os.path.join(exppath, 'work')) archivepath = workpath.replace( '/work/', '/archive/') os.makedirs(archivepath) workpath = os.path.realpath( os.path.join(exppath, 'work')) os.remove(os.path.join(exppath, 'work')) shutil.rmtree(workpath) # also make template restart symlink if it doesn't exist if template == 'test/1deg_jra55_iaf': # e.g. testing fresh clone templatearchive = os.path.join( templatepath, 'archive') if not os.path.exists(templatearchive): os.symlink( archivepath.replace( expname, os.path.basename(template)), templatearchive) # payu setup creates archive dir but not symlink, # so infer archive path from work dest and link to it archivepath = workpath.replace('/work/', '/archive/') if glob.glob(os.path.join(archivepath, 'output*')) +\ glob.glob(os.path.join(archivepath, 'restart*')): print(' *** deleting', relexppath, '- archive', archivepath, 'already contains restarts and/or outputs') shutil.rmtree(exppath) continue os.symlink(archivepath, os.path.join(exppath, 'archive')) # symlink restart initial conditions d = os.path.join('archive', 'restart' + startfrom) restartpath = os.path.realpath( os.path.join(template, d)) os.symlink(restartpath, os.path.join(exppath, d)) # copy template/output[startfrom]/ice/cice_in.nml d = os.path.join('archive', 'output' + startfrom, 'ice') os.makedirs(os.path.join(exppath, d)) shutil.copy(os.path.join(template, d, 'cice_in.nml'), os.path.join(exppath, d)) # set jobname in config.yaml to reflect experiment # don't use yaml package as it doesn't preserve comments configpath = os.path.join(exppath, 'config.yaml') with open(configpath + '_tmp', 'w') as wf: with open(configpath, 'r') as rf: for line in rf: if line.startswith('jobname:'): wf.write('jobname: ' + '_'.join([name, str(v)]) + '\n') else: wf.write(line) os.rename(configpath + '_tmp', configpath) # update metadata.yaml metadata = yaml.load(open( os.path.join(exppath, 'metadata.yaml'), 'r'), Loader=yaml.SafeLoader) desc = metadata['description'] desc += '\nNOTE: this is a perturbation experiment, but the description above is for the control run.' desc += '\nThis perturbation experiment is based on the control run ' + templatepath if startfrom == 'rest': desc += '\nbut with condition of rest' else: desc += '\nbut with initial condition ' + restartpath if turningangle: desc += '\nand ' + ' -> '.join([fname, group, 'cosw and sinw']) +\ ' changed to give a turning angle of ' + str(v) + ' degrees.' else: desc += '\nand ' + ' -> '.join([fname, group, name]) +\ ' changed to ' + str(v) metadata['description'] = LiteralString(desc) metadata['notes'] = LiteralString(metadata['notes']) metadata['keywords'] += ['perturbation', name] if turningangle: metadata['keywords'] += ['cosw', 'sinw'] with open(os.path.join(exppath, 'metadata.yaml'), 'w') as f: yaml.dump(metadata, f, default_flow_style=False, sort_keys=False) # remove run_summary_*.csv for f in glob.glob( os.path.join(exppath, 'run_summary_*.csv')): exprepo.git.rm(os.path.basename(f)) # commit exprepo.git.commit(a=True, m='set up ' + expname) ensemble.append(exppath) # count existing runs and do additional runs if needed if indata['nruns'] > 0: for exppath in ensemble: doneruns = len( glob.glob( os.path.join(exppath, 'archive', 'output[0-9][0-9][0-9]*'))) - 1 newruns = indata['nruns'] - doneruns if newruns > 0: # cmd = 'cd ' + exppath + ' && payu sweep && payu run -n ' + str(newruns) cmd = 'cd ' + exppath + ' && payu run -n ' + str(newruns) if test: cmd = '# ' + cmd print('\n' + cmd) subprocess.run(cmd, check=False, shell=True) else: print('\n --', exppath, 'has already completed', doneruns, 'runs') print()