示例#1
0
def ascii2nml(input_data):
    output_data = f90nml.Namelist(input_data)
    return output_data
示例#2
0
    def set_zero_config(self):
        """Set config such that radiative forcing and temperature output will be zero

        This method is intended as a convenience only, it does not handle everything in
        an obvious way. Adjusting the parameter settings still requires great care and
        may behave unepexctedly.
        """
        # zero_emissions is imported from scenarios module
        # TODO: setup MAGICC6 so it puts extra variables in right place and hence
        # warning about ignoring some data disappears
        zero_emissions.write(join(self.run_dir, self._scen_file_name),
                             self.version)

        time = zero_emissions.filter(variable="Emissions|CH4",
                                     region="World")["time"].values
        no_timesteps = len(time)
        # value doesn't actually matter as calculations are done from difference but
        # chose sensible value nonetheless
        co2_conc_pi = 722
        co2_conc = co2_conc_pi * np.ones(no_timesteps)
        co2_conc_df = pd.DataFrame({
            "time": time,
            "scenario": "idealised",
            "model": "unspecified",
            "climate_model": "unspecified",
            "variable": "Atmospheric Concentrations|CO2",
            "unit": "ppm",
            "todo": "SET",
            "region": "World",
            "value": co2_conc,
        })
        co2_conc_writer = MAGICCData(co2_conc_df)
        co2_conc_filename = "HIST_CONSTANT_CO2_CONC.IN"
        co2_conc_writer.metadata = {
            "header": "Constant pre-industrial CO2 concentrations"
        }
        co2_conc_writer.write(join(self.run_dir, co2_conc_filename),
                              self.version)

        ch4_conc_pi = 722
        ch4_conc = ch4_conc_pi * np.ones(no_timesteps)
        ch4_conc_df = pd.DataFrame({
            "time": time,
            "scenario": "idealised",
            "model": "unspecified",
            "climate_model": "unspecified",
            "variable": "Atmospheric Concentrations|CH4",
            "unit": "ppb",
            "todo": "SET",
            "region": "World",
            "value": ch4_conc,
        })
        ch4_conc_writer = MAGICCData(ch4_conc_df)
        ch4_conc_filename = "HIST_CONSTANT_CH4_CONC.IN"
        ch4_conc_writer.metadata = {
            "header": "Constant pre-industrial CH4 concentrations"
        }
        ch4_conc_writer.write(join(self.run_dir, ch4_conc_filename),
                              self.version)

        fgas_conc_pi = 0
        fgas_conc = fgas_conc_pi * np.ones(no_timesteps)

        varname = "FGAS_CONC"
        fgas_conc_df = pd.DataFrame({
            "time": time,
            "scenario": "idealised",
            "model": "unspecified",
            "climate_model": "unspecified",
            "variable": varname,
            "unit": "ppt",
            "todo": "SET",
            "region": "World",
            "value": fgas_conc,
        })
        fgas_conc_writer = MAGICCData(fgas_conc_df)
        fgas_conc_filename = "HIST_ZERO_{}.IN".format(varname)
        fgas_conc_writer.metadata = {"header": "Zero concentrations"}
        fgas_conc_writer.write(join(self.run_dir, fgas_conc_filename),
                               self.version)

        def_config = self.default_config
        tmp_nml = f90nml.Namelist({"nml_allcfgs": {"fgas_files_conc": 1}})
        fgas_files_conc_flag = list(
            self._fix_legacy_keys(tmp_nml,
                                  conflict="ignore")["nml_allcfgs"].keys())[0]
        fgas_conc_files = [fgas_conc_filename] * len(
            def_config["nml_allcfgs"][fgas_files_conc_flag])

        self.set_config(
            conflict="ignore",
            file_emisscen=self._scen_file_name,
            rf_initialization_method="ZEROSTARTSHIFT",
            rf_total_constantafteryr=10000,
            file_co2i_emis="",
            file_co2b_emis="",
            file_co2_conc=co2_conc_filename,
            co2_switchfromconc2emis_year=10000,
            file_ch4i_emis="",
            file_ch4b_emis="",
            file_ch4n_emis="",
            file_ch4_conc=ch4_conc_filename,
            ch4_switchfromconc2emis_year=10000,
            file_n2oi_emis="",
            file_n2ob_emis="",
            file_n2on_emis="",
            file_n2o_conc="",
            n2o_switchfromconc2emis_year=1750,
            file_noxi_emis="",
            file_noxb_emis="",
            file_noxi_ot="",
            file_noxb_ot="",
            file_noxt_rf="",
            file_soxnb_ot="",
            file_soxi_ot="",
            file_soxt_rf="",
            file_soxi_emis="",
            file_soxb_emis="",
            file_soxn_emis="",
            file_oci_emis="",
            file_ocb_emis="",
            file_oci_ot="",
            file_ocb_ot="",
            file_oci_rf="",
            file_ocb_rf="",
            file_bci_emis="",
            file_bcb_emis="",
            file_bci_ot="",
            file_bcb_ot="",
            file_bci_rf="",
            file_bcb_rf="",
            bcoc_switchfromrf2emis_year=1750,
            file_nh3i_emis="",
            file_nh3b_emis="",
            file_nmvoci_emis="",
            file_nmvocb_emis="",
            file_coi_emis="",
            file_cob_emis="",
            file_mineraldust_rf="",
            file_landuse_rf="",
            file_bcsnow_rf="",
            # rf_fgassum_scale=0,  # this appears to do nothing, hence the next two lines
            fgas_switchfromconc2emis_year=10000,
            rf_mhalosum_scale=0,
            stratoz_o3scale=0,
            rf_volcanic_scale=0,
            rf_solar_scale=0,
            mhalo_switchfromconc2emis_year=1750,
            fgas_files_conc=fgas_conc_files,
        )
示例#3
0
def write_ppt_files(tasks):
    freqgroups = cmor_utils.group(tasks, get_output_freq)
    # Fix for issue 313, make sure to always generate 6-hourly ppt:
    if freqgroups.keys() == [3]:
        freqgroups[6] = []
    if -1 in freqgroups.keys():
        freqgroups.pop(-1)
    freqs_to_remove = []
    for freq1 in freqgroups:
        if freq1 <= 0:
            continue
        for freq2 in freqgroups:
            if freq2 > freq1:
                if freq2 % freq1 == 0:
                    freqgroups[freq2] = freqgroups[freq1] + freqgroups[freq2]
                else:
                    log.error("Frequency %d is not a divisor of frequency %d: this is not supported, "
                              "removing the former" % (freq1, freq2))
                    freqs_to_remove.append(freq1)
    for freq in set(freqs_to_remove):
        freqgroups.pop(freq, None)
    num_slices_tot_sp, num_slices_tot_gp, num_blocks_tot_sp, num_blocks_tot_gp = 0, 0, 0, 0
    min_freq = max(freqgroups.keys())
    prev_freq = 0
    fx_namelist = {}
    for freq in sorted(freqgroups.keys()):
        mfp2df, mfpphy, mfp3dfs, mfp3dfp, mfp3dfh = [], [], [], [], []
        num_slices_sp, num_slices_gp, num_blocks_sp, num_blocks_gp = 0, 0, 0, 0
        alevs, plevs, hlevs = [], [], []
        for task in freqgroups[freq]:
            zaxis, levs = cmor_target.get_z_axis(task.target)
            root_codes = task.source.get_root_codes()
            if not zaxis:
                for code in root_codes:
                    if freq > 0 and code in cmor_source.ifs_source.grib_codes_fx:
                        continue
                    if code in cmor_source.ifs_source.grib_codes_3D:
                        # Exception for orog and areacella, depend only on lowest level of 129:
                        if task.target.variable in ["orog", "areacella"] and code == cmor_source.grib_code(129):
                            mfp2df.append(code)
                        else:
                            log.warning("3D grib code %s used in 2D cmor-target %s..."
                                        "assuming this is on model levels" % (str(code), task.target.variable))
                            mfp3dfs.append(code)
                    elif code in cmor_source.ifs_source.grib_codes_2D_dyn:
                        log.info("Adding grib code %s to MFP2DF %dhr ppt file for variable "
                                 "%s in table %s" % (str(code), freq, task.target.variable, task.target.table))
                        mfp2df.append(code)
                    elif code in cmor_source.ifs_source.grib_codes_2D_phy:
                        log.info("Adding grib code %s to MFPPHY %dhr ppt file for variable "
                                 "%s in table %s" % (str(code), freq, task.target.variable, task.target.table))
                        mfpphy.append(code)
                    else:
                        log.error("Unknown 2D IFS grib code %s skipped" % str(code))
            else:
                for code in root_codes:
                    if freq > 0 and code in cmor_source.ifs_source.grib_codes_fx:
                        continue
                    if code in cmor_source.ifs_source.grib_codes_3D:
                        if zaxis in cmor_target.model_axes:
                            log.info("Adding grib code %s to MFP3DFS %dhr ppt file for variable "
                                     "%s in table %s" % (str(code), freq, task.target.variable, task.target.table))
                            mfp3dfs.append(code)
                            alevs.extend(levs)
                        elif zaxis in cmor_target.pressure_axes:
                            log.info("Adding grib code %s to MFP3DFP %dhr ppt file for variable "
                                     "%s in table %s" % (str(code), freq, task.target.variable, task.target.table))
                            mfp3dfp.append(code)
                            plevs.extend(levs)
                        elif zaxis in cmor_target.height_axes:
                            log.info("Adding grib code %s to MFP3DFH %dhr ppt file for variable "
                                     "%s in table %s" % (str(code), freq, task.target.variable, task.target.table))
                            mfp3dfh.append(code)
                            hlevs.extend(levs)
                        else:
                            log.error("Axis type %s unknown, adding grib code %s"
                                      "to model level variables" % (zaxis, str(code)))
                    elif code in cmor_source.ifs_source.grib_codes_2D_dyn:
                        mfp2df.append(code)
                    elif code in cmor_source.ifs_source.grib_codes_2D_phy:
                        mfpphy.append(code)
                    # case for PEXTRA tendencies is missing
                    else:
                        log.error("Unknown 3D IFS grib code %s skipped" % str(code))
        # Always add the geopotential, recommended by ECMWF
        if cmor_source.grib_code(129) not in mfp3dfs:
            mfp2df.append(cmor_source.grib_code(129))
        # Always add the surface pressure, recommended by ECMWF
        mfpphy.append(cmor_source.grib_code(134))
        # Always add the logarithm of surface pressure, recommended by ECMWF
        mfp2df.append(cmor_source.grib_code(152))
        nfp2dfsp, nfp2dfgp = count_spectral_codes(mfp2df)
        mfp2df = sorted(list(map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(), set(mfp2df))))
        nfpphysp, nfpphygp = count_spectral_codes(mfpphy)
        mfpphy = sorted(list(map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(), set(mfpphy))))
        nfp3dfssp, nfp3dfsgp = count_spectral_codes(mfp3dfs)
        mfp3dfs = sorted(list(map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(), set(mfp3dfs))))
        nfp3dfpsp, nfp3dfpgp = count_spectral_codes(mfp3dfp)
        mfp3dfp = sorted(list(map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(), set(mfp3dfp))))
        nfp3dfhsp, nfp3dfhgp = count_spectral_codes(mfp3dfh)
        mfp3dfh = sorted(list(map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(), set(mfp3dfh))))
        plevs = sorted(list(set([float(s) for s in plevs])))[::-1]
        hlevs = sorted(list(set([float(s) for s in hlevs])))
        namelist = {"CFPFMT": "MODEL"}
        if any(mfp2df):
            namelist["NFP2DF"] = len(mfp2df)
            namelist["MFP2DF"] = mfp2df
            num_slices_sp += nfp2dfsp
            num_slices_gp += nfp2dfgp
        if any(mfpphy):
            namelist["NFPPHY"] = len(mfpphy)
            namelist["MFPPHY"] = mfpphy
            num_slices_sp += nfpphysp
            num_slices_gp += nfpphygp
        if any(mfp3dfs):
            namelist["NFP3DFS"] = len(mfp3dfs)
            namelist["MFP3DFS"] = mfp3dfs
            # To include all model levels use magic number -99. Opposite, by using the magic number -1 the variable is not saved at any model level:
            namelist["NRFP3S"] = -1
            num_blocks_sp += nfp3dfssp
            num_blocks_gp += nfp3dfsgp
        if any(mfp3dfp):
            namelist["NFP3DFP"] = len(mfp3dfp)
            namelist["MFP3DFP"] = mfp3dfp
            namelist["RFP3P"] = plevs
            num_slices_sp += (nfp3dfpsp * len(plevs))
            num_slices_gp += (nfp3dfpgp * len(plevs))
        if any(mfp3dfh):
            namelist["NFP3DFH"] = len(mfp3dfh)
            namelist["MFP3DFH"] = mfp3dfh
            namelist["RFP3H"] = hlevs
            num_slices_sp += (nfp3dfhsp * len(hlevs))
            num_slices_gp += (nfp3dfhgp * len(hlevs))
        num_slices_tot_sp = num_slices_sp if prev_freq == 0 else \
            (num_slices_sp + ((freq/prev_freq) - 1) * num_slices_tot_sp)
        num_slices_tot_gp = num_slices_gp if prev_freq == 0 else \
            (num_slices_gp + ((freq/prev_freq) - 1) * num_slices_tot_gp)
        num_blocks_tot_sp = num_blocks_sp if prev_freq == 0 else \
            (num_blocks_sp + ((freq/prev_freq) - 1) * num_blocks_tot_sp)
        num_blocks_tot_gp = num_blocks_gp if prev_freq == 0 else \
            (num_blocks_gp + ((freq/prev_freq) - 1) * num_blocks_tot_gp)
        prev_freq = freq
        nml = f90nml.Namelist({"NAMFPC": namelist})
        nml.uppercase, nml.end_comma = True, True
        if freq > 0:
            f90nml.write(nml, "pptdddddd%04d" % (100 * freq,))
        if freq == 0:
            fx_namelist = namelist
        if freq == min_freq:
            # Always add orography and land mask for lowest frequency ppt
            mfpphy.extend([129, 172, 43])
            mfpphy = sorted(list(set(mfpphy)))
            namelist["MFPPHY"] = mfpphy
            namelist["NFPPHY"] = len(mfpphy)
            nml = f90nml.Namelist({"NAMFPC": join_namelists(namelist, fx_namelist)})
            nml.uppercase, nml.end_comma = True, True
            # Write initial state ppt
            f90nml.write(nml, "ppt0000000000")
    average_hours_per_month = 730
    slices_per_month_sp = (average_hours_per_month * num_slices_tot_sp) / prev_freq
    slices_per_month_gp = (average_hours_per_month * num_slices_tot_gp) / prev_freq
    blocks_per_month_sp = (average_hours_per_month * num_blocks_tot_sp) / prev_freq
    blocks_per_month_gp = (average_hours_per_month * num_blocks_tot_gp) / prev_freq
    num_layers = 91
    log.info("")
    log.info("EC-Earth IFS output volume estimates:")
    log.info("---------------------------------------------------------------------------")
    log.info("# spectral GRIB messages p/m:  %d" % (slices_per_month_sp + num_layers * blocks_per_month_sp))
    log.info("# gridpoint GRIB messages p/m: %d" % (slices_per_month_gp + num_layers * blocks_per_month_gp))
    log.info("---------------------------------------------------------------------------")
    log.info("                           T255L91                     T511L91               ")
    log.info("---------------------------------------------------------------------------")
    vol255 = (slices_per_month_sp + num_layers * blocks_per_month_sp) * 0.133 / 1000. +\
             (slices_per_month_gp + num_layers * blocks_per_month_gp) * 0.180 / 1000.
    vol511 = (slices_per_month_sp + num_layers * blocks_per_month_sp) * 0.503 / 1000. +\
             (slices_per_month_gp + num_layers * blocks_per_month_gp) * 0.698 / 1000.
    log.info("                           %.2f GB/yr                %.2f GB/yr        " % (12*vol255, 12*vol511))

   #volume_estimate = open('volume-estimate-ifs.txt','w')
   #volume_estimate.write(' \nEC-Earth3 IFS volume estimates of generated output:{}'.format('\n'))
   #volume_estimate.write('  Volume estimate of the spectral + gridpoint GRIB files for T255L91 grid: {} GB/yr{}'.format(12*vol255, '\n'))
   #volume_estimate.write('  Volume estimate of the spectral + gridpoint GRIB files for T511L91 grid: {} GB/yr{}'.format(12*vol511, '\n\n'))
   #volume_estimate.write('  Number of spectral  GRIB messages per month: {}{}'.format(slices_per_month_sp + num_layers * blocks_per_month_sp, '\n'))
   #volume_estimate.write('  Number of gridpoint GRIB messages per month: {}{}'.format(slices_per_month_gp + num_layers * blocks_per_month_gp, '\n\n'))
   #volume_estimate.close()

    hf = 3.0 # IFS heuristic factor
    volume_estimate = open('volume-estimate-ifs.txt','w')
    volume_estimate.write('{}'.format('\n\n\n'))
    volume_estimate.write('Heuristic volume estimate for the raw EC-Earth3 IFS  output on the T255L91     grid: {:6} GB per year{}'.format(round((12*vol255) / hf, 1), '\n'))
    volume_estimate.write('Heuristic volume estimate for the raw EC-Earth3 IFS  output on the T511L91     grid: {:6} GB per year{}'.format(round((12*vol511) / hf, 1), '\n'))
    volume_estimate.close()
示例#4
0
    'damping_option'] = 'exponential_cutoff'  #Use the high-wavenumber filter option
baseexp.namelist['spectral_dynamics_nml']['damping_order'] = 4
baseexp.namelist['spectral_dynamics_nml']['damping_coeff'] = 1.3889e-04
baseexp.namelist['spectral_dynamics_nml']['cutoff_wn'] = 100

#Set initial conditions
baseexp.namelist['spectral_dynamics_nml'][
    'initial_sphum'] = 0.0  #No initial specific humidity
baseexp.namelist['spectral_init_cond_nml'][
    'initial_temperature'] = 200.  #Lower than normal initial temperature

#Set parameters for near-surface Rayleigh drag
baseexp.namelist['rayleigh_bottom_drag_nml'] = f90nml.Namelist({
    'kf_days':
    10.0,
    'do_drag_at_surface':
    True,
    'variable_drag':
    False
})

#Set parameters for dry convection scheme
baseexp.namelist['dry_convection_nml'] = f90nml.Namelist({
    'tau': 21600.,
    'gamma': 1.0,  # K/km
})

#Lets do a run!
baseexp.runmonth(1, use_restart=False, num_cores=32, light=False)
for i in range(2, 121):
    baseexp.runmonth(i, num_cores=32, light=False)
示例#5
0
 def test_check_repeat_flag(self):
     nml = f90nml.Namelist()
     self.assertFalse(nml.repeat_counter)
示例#6
0
 def test_set_repeat_flag_incorrect(self):
     nml = f90nml.Namelist()
     with self.assertRaises(TypeError):
         nml.repeat_counter = 'Hello'
示例#7
0
def parse():
    parser = argparse.ArgumentParser()

    parser.add_argument('--version', action='version',
                        version='f90nml {0}'.format(f90nml.__version__))

    parser.add_argument('--group', '-g', action='store')
    parser.add_argument('--variable', '-v', action='append')
    parser.add_argument('--patch', '-p', action='store_true')
    parser.add_argument('--format', '-f', action='store')
    parser.add_argument('--output', '-o', action='store')

    parser.add_argument('input', nargs='?')
    parser.add_argument('output', nargs='?')

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit()

    args = parser.parse_args()

    input_fname = args.input
    output_fname = args.output

    # Get input format
    # TODO: Combine with output format
    if input_fname:
        _, input_ext = os.path.splitext(input_fname)
        if input_ext == '.json':
            input_fmt = 'json'
        elif input_ext == '.yaml':
            input_fmt = 'yaml'
        else:
            input_fmt = 'nml'
    else:
        input_fmt = 'nml'

    # Output format flag validation
    valid_formats = ('json', 'yaml', 'nml')
    if args.format and args.format not in valid_formats:
        print('f90nml: error: format must be one of the following: {0}'
              ''.format(valid_formats))
        sys.exit(-1)

    # Do not patch non-namelist output
    if args.format != 'nml' and args.patch:
        print('f90nml: error: Only namelist output can be patched.')
        sys.exit(-1)

    # Get output format
    # TODO: Combine with input format
    if not args.format:
        if output_fname:
            _, output_ext = os.path.splitext(output_fname)
            if output_ext == '.json':
                output_fmt = 'json'
            elif output_ext in ('.yaml', '.yml'):
                output_fmt = 'yaml'
            else:
                output_fmt = 'nml'
        else:
            output_fmt = 'nml'
    else:
        output_fmt = args.format

    if (input_fmt == 'yaml' or output_fmt == 'yaml') and not has_yaml:
        print('f90nml: error: YAML module could not be found.')
        sys.exit(-1)

    # Read the input file
    if input_fname:
        if input_fmt in ('json', 'yaml'):
            if input_fmt == 'json':
                with open(input_fname) as input_file:
                    input_data = json.load(input_file)
            elif input_ext == '.yaml':
                with open(input_fname) as input_file:
                    input_data = yaml.safe_load(input_file)
        else:
            input_data = f90nml.read(input_fname)
    else:
        input_data = {}

    input_data = f90nml.Namelist(input_data)

    # Construct the update namelist
    update_nml = {}
    if args.variable:
        if not args.group:
            # Use the first available group
            grp = list(input_data.keys())[0]
            print('f90nml: warning: Assuming variables are in group \'{0}\'.'
                  ''.format(grp))
        else:
            grp = args.group

        update_nml_str = '&{0} {1} /\n'.format(grp, ', '.join(args.variable))
        update_io = StringIO(update_nml_str)
        update_nml = f90nml.read(update_io)
        update_io.close()

    # Target output
    output_file = open(output_fname, 'w') if output_fname else sys.stdout

    if args.patch:
        # We have to read the file twice for a patch.  The main reason is
        # to identify the default group, in case this is not provided.
        # It could be avoided if a group is provided, but logically that could
        # a mess that I do not want to sort out right now.
        f90nml.patch(input_fname, update_nml, output_file)

    else:
        # Update the input namelist directly
        if update_nml:
            try:
                input_data[grp].update(update_nml[grp])
            except KeyError:
                input_data[grp] = update_nml[grp]

        # Write to output
        if not args.patch:
            if output_fmt in ('json', 'yaml'):
                if output_fmt == 'json':
                    input_data = input_data.todict(decomplex=True)
                    json.dump(input_data, output_file,
                              indent=4, separators=(',', ': '))
                    output_file.write('\n')

                elif output_fmt == 'yaml':
                    input_data = input_data.todict(decomplex=True)
                    yaml.dump(input_data, output_file,
                              default_flow_style=False)
            else:
                # Default to namelist output
                f90nml.write(input_data, output_file)

    # Cleanup
    if output_file != sys.stdout:
        output_file.close()
示例#8
0
import f90nml

# Adapted from the original core.nml and phys.nml files included in 2006 codebase.

# where the value is the same as the default in the code base, it is commented out
# and therefore not included in the namelist.
# Where the value is different, the code default is shown in an inline comment
basic = f90nml.Namelist({})

basic['spectral_dynamics_nml'] = {
    #'damping_option'          : 'resolution_dependent',
    'damping_order': 4,  # default: 2
    #'do_mass_correction':      True
    #'do_energy_correction':    True
    #'do_water_correction':     True
    'water_correction_limit': 200.e2,  # default: 0
    #'use_virtual_temperature': False
    #'vert_advect_uv'          : 'second_centered',
    #'vert_advect_t'           : 'second_centered',
    #'longitude_origin'        : 0.0,
    #'robert_coeff'            : .03,                   # default: 0.04
    #'alpha_implicit'          : .5,
    'reference_sea_level_press': 1.0e5,  # default: 101325
    #'lon_max'                 : 128,
    #'lat_max'                 : 64,
    'num_levels': 25,  # default: 18
    #'num_fourier'             : 42,
    #'num_spherical'           : 43,
    #'fourier_inc'             : 1,
    #'triang_trunc'            :True
    'valid_range_t': [100., 800.],  # default: (100, 500)
示例#9
0
def setup_usgs_daily(config, config_file: None):

    print('Preparing USGS daily observations.')

    # Setup the namelist and establish the "outputdir". The create_usgs_daily_obs_seq
    # actually creates the obs sequences.

    usgs_daily_config = config['observation_preparation']['USGS_daily']
    input_dir = usgs_daily_config['input_dir']
    output_dir = usgs_daily_config['output_dir']
    # Output directory: make if DNE
    output_dir.mkdir(exist_ok=False, parents=True)

    # converter: identity or regular obs converter?
    # Check that the desired obs converter is in the dart build
    exp_dir = config['experiment']['experiment_dir']
    dart_build_dir = config['dart']['build_dir']
    dart_compile = pickle.load(
        open(exp_dir / dart_build_dir / 'DartCompile.pkl', 'rb'))

    if usgs_daily_config['identity_obs']:
        ocp = dart_compile.models__wrf_hydro__work.exes[
            'create_identity_streamflow_obs']
    else:
        ocp = dart_compile.observations__obs_converters__USGS__work.exes[
            'convert_streamflow']

    obs_conv_prog = ocp
    _ = shutil.copy(obs_conv_prog, output_dir / obs_conv_prog.name)

    # input.nml: patch.
    converter_nml = str(obs_conv_prog.name) + '_nml'
    obs_conv_patches = usgs_daily_config['input_nml_patches'][converter_nml]
    input_nml = f90nml.read(obs_conv_prog.parent / 'input.nml')
    internal_patches = ['input_files', 'location_file']
    special_patches = ['gages_file_list']
    for kk in usgs_daily_config['input_nml_patches'].keys():
        if kk in internal_patches + special_patches:
            if kk in internal_patches:
                warnings.warn(
                    "USGS observation converter namelist patch is applied internally: "
                    + kk)
            pass
        input_nml[kk].update(usgs_daily_config['input_nml_patches'][kk])

    # input.nml gage_file_list: Allow a file or a list: link or construct file, set in input.nml.
    wanted_gages = usgs_daily_config['wanted_gages']
    if type(wanted_gages) in [str, pathlib.PosixPath]:
        wanted_gages = pathlib.PosixPath(wanted_gages)
        (output_dir / wanted_gages.name).symlink_to(wanted_gages)
        input_nml[convert_nml]['gages_list_file'] = wanted_gages.name
    elif type(wanted_gages) is list:
        default_filename = 'wanted_gages_list.txt'
        input_nml[converter_nml]['gages_list_file'] = default_filename
        with open(output_dir / default_filename, 'w') as opened_file:
            for gg in wanted_gages:
                _ = opened_file.write(str(gg) + '\n')
    else:
        raise ValueError("wanted_gages must be either string or list type.")

    # input.nml location_file/Routelink: link file, edit input.nml
    run_dir = config['experiment']['run_dir']
    m0 = pickle.load(open(run_dir / "member_000/WrfHydroSim.pkl", 'rb'))
    route_link_f = run_dir / 'member_000' / m0.base_hydro_namelist[
        'hydro_nlist']['route_link_f']
    (output_dir / route_link_f.name).symlink_to(route_link_f)
    input_nml[converter_nml]['location_file'] = route_link_f.name

    #input.nml input_files: create a list of files in the start and end range.
    in_start_time = datetime.datetime.strptime(
        str(usgs_daily_config['start_date']), '%Y-%m-%d')
    in_end_time = datetime.datetime.strptime(
        str(usgs_daily_config['end_date']), '%Y-%m-%d')
    all_input_files = sorted(input_dir.glob("*.usgsTimeSlice.ncdf"))
    input_files_requested = []
    for ff in all_input_files:
        file_time = datetime.datetime.strptime(
            ff.name.split('.')[0], '%Y-%m-%d_%H:%M:%S')
        # For end_time, add in a day and use a stricly less than...
        if file_time >= in_start_time and file_time < (
                in_end_time + datetime.timedelta(days=1)):
            input_files_requested.append(ff)

    default_filename = 'list_of_obs_files.txt'
    input_nml[converter_nml]['input_files'] = default_filename
    with open(output_dir / default_filename, 'w') as opened_file:
        for ff in input_files_requested:
            _ = opened_file.write(str(ff) + '\n')

    # For identity obs, the model_mod and the model_nml namelist are used....
    # This requires hydro_rst file, hydro.namelist, attendant file....
    # Parameter and LSM files should not be needed, so just set the hydro restart file.
    if usgs_daily_config['identity_obs']:

        hydro_rst_file = run_dir / 'member_000' / m0.base_hydro_namelist[
            'hydro_nlist']['restart_file']
        (output_dir / hydro_rst_file.name).symlink_to(hydro_rst_file)
        input_nml['model_nml']['domain_order'] = 'hydro'
        input_nml['model_nml']['domain_shapefiles'] = str(hydro_rst_file.name)

        f90nml.Namelist(m0.base_hydro_namelist).write(output_dir /
                                                      'hydro.namelist',
                                                      force=True)
        top_level_dir = get_top_level_dir_from_config(config, m0)
        (output_dir / top_level_dir).symlink_to(
            config['wrf_hydro']['domain_src'] / top_level_dir)

    # Now we are done editing it, write the input.nml back out.
    input_nml.write(output_dir / 'input.nml')

    # Symlink the config file into the output_dir so the default yaml file name
    # can be used by create_usgs_daily_obs_seq.
    if config_file is None:
        config_file = sorted(exp_dir.glob('original.*.yaml'))[0]
    (output_dir / 'config_file.yaml').symlink_to(config_file)

    # Stage the file that does the batch processing.
    this_file = pathlib.Path(__file__)
    batcher_base = 'create_usgs_daily_obs_seq.py'
    (output_dir / batcher_base).symlink_to(this_file.parent / batcher_base)

    # Setup the scheduled script.
    orig_submit_script = this_file.parent / 'submission_scripts/submit_usgs_daily_obs_converter.sh'
    this_submit_script = output_dir / 'submit_usgs_daily_obs_converter.sh'
    shutil.copy(orig_submit_script, this_submit_script)

    # Set the PBS directives (cheyenne)

    # PBS options from config
    # Short-hand
    usgs_sched = config['observation_preparation']['USGS_daily']['scheduler']

    # The easy ones.
    replace_in_file(this_submit_script, 'JOB_NAME_TEMPLATE',
                    usgs_sched['job_name'])
    replace_in_file(this_submit_script, 'ACCOUNT_TEMPLATE',
                    usgs_sched['account'])
    replace_in_file(this_submit_script, 'EMAIL_WHO_TEMPLATE',
                    usgs_sched['email_who'])
    replace_in_file(this_submit_script, 'EMAIL_WHEN_TEMPLATE',
                    usgs_sched['email_when'])
    replace_in_file(this_submit_script, 'QUEUE_TEMPLATE', usgs_sched['queue'])

    # Wall time
    usgs_walltime = usgs_sched['walltime']
    if len(usgs_walltime.split(':')) == 2:
        usgs_walltime = usgs_walltime + ':00'
    usgs_walltime = 'walltime=' + usgs_walltime
    replace_in_file(this_submit_script, 'WALLTIME_TEMPLATE', usgs_walltime)

    # Select statement
    # Right now, only single node processing
    select_stmt = 'select=1:ncpus={ncpus}:mpiprocs={mpiprocs}'.format(
        **{
            'ncpus': usgs_sched['ncpus'],
            'mpiprocs': usgs_sched['mpiprocs']
        })
    replace_in_file(this_submit_script, 'PBS_SELECT_TEMPLATE', select_stmt)

    wait_file = output_dir / '.this_submit_script_not_complete'
    replace_in_file(this_submit_script, 'WAIT_FILE_TEMPLATE', str(wait_file))

    proc = subprocess.Popen(shlex.split('touch ' + wait_file.name),
                            cwd=output_dir)
    proc.wait()

    proc = subprocess.Popen(shlex.split('qsub ' + this_submit_script.name),
                            cwd=output_dir)
    proc.wait()

    print('Job submitted. \nWait file: ' + str(wait_file) + ' ...')
    while wait_file.exists():
        msg = 'Last check for wait file {twirl}: ' + \
              datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        for twirl in ['|', '/', '-', '\\', '|', '/', '-', '\\']:
            print(msg.format(**{'twirl': twirl}), end='\r')
            time.sleep(10 / 8)

    # Link the obs_seq files to the "all_obs_dir" for the experiment.
    all_obs_dir = pathlib.PosixPath(
        config['observation_preparation']['all_obs_dir'])
    all_obs_seq = output_dir.glob('obs_seq.*')
    for oo in all_obs_seq:
        (all_obs_dir / oo.name).symlink_to(oo)

    return 0
示例#10
0
def parse():
    parser = argparse.ArgumentParser()

    parser.add_argument('--version',
                        action='version',
                        version='f90nml {0}'.format(f90nml.__version__))

    parser.add_argument('--group', '-g', action='store')
    parser.add_argument('--set', '-s', action='append')
    parser.add_argument('--patch', '-p', action='store_false')

    parser.add_argument('input')
    parser.add_argument('output', nargs='?')

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit()

    args = parser.parse_args()

    input_fname = args.input
    output_fname = args.output

    # Input config

    if input_fname:
        _, input_ext = os.path.splitext(input_fname)
        if input_ext == '.json':
            with open(input_fname) as input_file:
                input_data = json.load(input_file)

        elif has_yaml and input_ext == '.yaml':
            with open(input_fname) as input_file:
                input_data = yaml.safe_load(input_file)

        else:
            # Assume unrecognised extensions are namelists
            input_data = f90nml.read(input_fname)
    else:
        input_data = {}

    input_data = f90nml.Namelist(input_data)

    # Replace any values

    if args.set:
        if not args.group:
            # Use the first available group
            grp = list(input_data.keys())[0]
            print('f90nml: warning: Assuming variables are in group \'{0}\'.'
                  ''.format(grp))
        else:
            grp = args.group

        update_nml = '&{0} {1} /\n'.format(grp, ', '.join(args.set))
        with io.StringIO(update_nml) as update_io:
            update_data = f90nml.read(update_io)

        input_data[grp].update(update_data[grp])

    # Target output

    if output_fname:
        _, output_ext = os.path.splitext(output_fname)

        # TODO: Better control of output format
        if output_ext == '.json':
            input_data = input_data.todict(decomplex=True)
            with open(output_fname, 'w') as output_file:
                json.dump(input_data,
                          output_file,
                          indent=4,
                          separators=(',', ': '))
                output_file.write('\n')

        elif output_ext == '.yaml':
            if has_yaml:
                input_data = input_data.todict(decomplex=True)
                with open(output_fname, 'w') as output_file:
                    yaml.dump(input_data,
                              output_file,
                              default_flow_style=False)
            else:
                print('f90nml: error: YAML module could not be found.')
                sys.exit(-1)

        else:
            # Default to namelist output
            f90nml.write(input_data, output_fname)
    else:
        # TODO: Combine with extension output
        f90nml.write(input_data, sys.stdout)
示例#11
0
def stratcal_merge_output_2(indata_exch, outdata):
    """Merges information from indata_exch and outdata
    keeping all information from the indata,
    replacing the stratcal_sweep_list, and asdding new IDs as needed

    NB Variant, that gets its input from original (NOT exchange) format stratcal.out
    Written when stratcal.out.def was NOT correct.

    NBNB HACKY"""

    result = copy.deepcopy(indata_exch)

    sweeps = outdata['sweep_list']
    if not isinstance(sweeps, list):
        sweeps = [sweeps]
    goniostat_setting_list = result['goniostat_setting_list'] = []
    centred_setting_list = result['centred_goniostat_setting_list'] = []
    stratcal_sweep_list = result['stratcal_sweep_list'] = []
    goniostat_setting_map = {}

    # Transfer beam, beamstop and detector setting IDs from input
    # NBNB This assumes that there was only one of each
    # and it was transferred as by stratcal_exch2org
    other_settings = {}
    for tag in ('beam_setting', 'beamstop_setting', 'detector_setting'):
        ll = indata_exch[tag + '_list']
        if isinstance(ll, list):
            other_settings[tag + '_id'] = ll[0]['id']
        else:
            other_settings[tag + '_id'] = ll['id']

    ii = 0
    for sweep in sweeps:

        sw = f90nml.Namelist()
        sw.update(other_settings)

        # Make startcal_sweep
        ii += 1
        stratcal_sweep_list.append(sw)
        sw['start_deg'] = sweep['omega_deg']
        sw['length_deg'] = sweep['n_frames'] * sweep['step_deg']
        sw['type'] = 'MAIN:ORIENT'
        sw['group_no'] = ii

        # Check if we have the exact same setting already
        settings = (sweep['omega_deg'], sweep['kappa_deg'],
                    sweep['phi_deg']) + tuple(sweep['trans_xyz'])
        use_id = goniostat_setting_map.get(settings)
        has_centring = any(x for x in settings[3:])
        if not use_id:

            # Make goniostat_setting
            goniostat_setting = f90nml.Namelist()
            goniostat_setting['id'] = str(uuid.uuid1())
            goniostat_setting['omega_deg'] = settings[0]
            goniostat_setting['kappa_deg'] = settings[1]
            goniostat_setting['phi_deg'] = settings[2]
            goniostat_setting['spindle_deg'] = sweep['spindle_deg']
            goniostat_setting['scan_axis_no'] = sweep['axis_no']
            goniostat_setting['aligned_crystal_axis_order'] = 0
            goniostat_setting_list.append(goniostat_setting)

            if has_centring:
                # Make centring_goniostat_setting
                obj = f90nml.Namelist()
                use_id = obj['id'] = str(uuid.uuid1())
                obj['goniostat_setting_id'] = goniostat_setting['id']
                obj['trans_1'], obj['trans_2'], obj['trans_3'] = settings[3:]
                centred_setting_list.append(obj)
            else:
                use_id = goniostat_setting['id']

        if has_centring:
            sw['centred_goniostat_setting_id'] = use_id
            sw['goniostat_setting_id'] = ' '
        else:
            sw['goniostat_setting_id'] = use_id
            sw['centred_goniostat_setting_id'] = ' '

    loop_counts = result['loop_count_list']
    loop_counts['n_sweeps'] = len(sweeps)
    loop_counts['n_goniostat_settings'] = len(goniostat_setting_list)
    loop_counts['n_centred_goniostat_settings'] = len(centred_setting_list)
    #
    return result
示例#12
0
    def setup(self):

        # TODO: Find a better place to generate this list
        files = [
            f for f in os.listdir(self.control_path) if f.startswith('data')
        ]
        files.append('eedata')

        # Rudimentary check that matching files are namelists. Can only check
        # if namelist is empty. May excluded false positives, but these are
        # devoid of useful information in that case
        for fname in files:
            try:
                data_nml = self.read_namelist(fname)
            except Exception as e:
                data_nml = []

            if len(data_nml) > 0:
                self.config_files.append(fname)
            else:
                print("Excluding {0} from configuration files: assumed "
                      "to be not a namelist file (or empty)".format(fname))

        # Generic model setup
        super(Mitgcm, self).setup()

        if self.prior_restart_path and not self.expt.repeat_run:
            # Determine total number of timesteps since initialisation
            core_restarts = [
                f for f in os.listdir(self.prior_restart_path)
                if f.startswith('pickup.')
            ]
            try:
                # NOTE: Use the most recent, in case of multiple restarts
                n_iter0 = max([int(f.split('.')[1]) for f in core_restarts])
            except ValueError:
                sys.exit("payu: error: no restart files found.")
        else:
            n_iter0 = 0

        # Update configuration file 'data'
        data_path = os.path.join(self.work_path, 'data')
        data_nml = self.read_namelist(data_path)

        # Timesteps are either global (deltat) or divided into momentum
        # (deltatmom) and tracer (deltat).  If deltat is missing, then we just
        # try deltatmom.  But I am not sure how to best handle this case.

        restart_calendar_path = os.path.join(self.work_init_path,
                                             self.restart_calendar_file)
        # TODO: Sort this out with an MITgcm user
        try:
            dt = float(data_nml['parm03']['deltat'])
        except KeyError:
            dt = float(data_nml['parm03']['deltatmom'])

        # Basetime defaults to zero
        basetime = 0.

        # Runtime is set either by timesteps (ntimesteps) or physical
        # time (startTime and endTime).
        t_start = data_nml['parm03'].get('starttime', None)
        t_end = data_nml['parm03'].get('endtime', None)

        n_timesteps = data_nml['parm03'].get('ntimesteps', None)

        # Support specifying just start and end times, and infer
        # n_timesteps from this, even if dt changes run length
        # remains the same
        if t_start is not None:
            if t_end is not None:
                # Standardise on starttime, ntimesteps and niter0
                del data_nml['parm03']['endtime']

                if n_timesteps is None:
                    print("Calculated n_timesteps from starttime and endtime")
                    n_timesteps = round((t_end - t_start) / dt)
            else:
                # Assume n_timesteps and dt set correctly
                pass

        if t_start is None or (self.prior_restart_path
                               and not self.expt.repeat_run):
            # Look for a restart file from a previous run
            if os.path.exists(restart_calendar_path):
                with open(restart_calendar_path, 'r') as restart_file:
                    restart_info = yaml.safe_load(restart_file)
                t_start = float(restart_info['endtime'])
            else:
                # Use same logic as MITgcm and assume
                # constant dt for the whole experiment
                t_start = n_iter0 * dt

        # Check if deltat has changed
        if n_iter0 != round(t_start / dt):

            # Specify a pickup suffix using previous niter0
            data_nml['parm03']['pickupsuff'] = '{:010d}'.format(n_iter0)

            n_iter0_previous = n_iter0

            n_iter0 = round(t_start / dt)

            if n_iter0 * dt != t_start:
                # Modify basetime.
                # TODO: Change logic entirely to using
                # this conceptually much simpler approach
                basetime = t_start
                n_iter0 = 0

            if n_iter0 + n_timesteps == n_iter0_previous:
                mesg = ('payu : error: Timestep changed to {dt}. '
                        'Timestep at end identical to previous pickups: '
                        '{niter}\nThis would overwrite previous '
                        'pickups'.format(dt=dt, niter=(n_iter0 + n_timesteps)))
                sys.exit(mesg)

        t_end = t_start + dt * n_timesteps
        pchkpt_freq = t_end - t_start

        print('  base time:  {}'.format(basetime))
        print('  start time: {}'.format(t_start))
        print('  end time:   {}'.format(t_end))
        print('  niter0 :    {}'.format(n_iter0))
        print('  ntimesteps: {}'.format(n_timesteps))
        print('  dt:         {}'.format(dt))
        print('  end - start:     {}'.format(pchkpt_freq))
        print('  dt * ntimesteps: {}'.format(dt * n_timesteps))
        if pchkpt_freq != dt * n_timesteps:
            print('payu : error : time inconsistencies, '
                  'pchkptfreq ({}) != experiment length ({})'
                  ''.format(pchkpt_freq, dt * n_timesteps))
            sys.exit(1)

        data_nml['parm03']['startTime'] = t_start
        data_nml['parm03']['niter0'] = n_iter0
        data_nml['parm03']['endTime'] = t_end
        data_nml['parm03']['baseTime'] = basetime

        # NOTE: Consider permitting pchkpt_freq < dt * n_timesteps
        if t_end % pchkpt_freq != 0:
            # Terrible hack for when we change dt, the pickup frequency
            # no longer make sense, so have to set it to the total runtime
            data_nml['parm03']['pchkptfreq'] = t_end
        else:
            data_nml['parm03']['pchkptfreq'] = pchkpt_freq

        data_nml['parm03']['chkptfreq'] = 0

        data_nml.write(data_path, force=True)

        # Patch or create data.mnc
        mnc_header = os.path.join(self.work_path, 'mnc_')

        data_mnc_path = os.path.join(self.work_path, 'data.mnc')
        try:
            data_mnc_nml = self.read_namelist(data_mnc_path)
            data_mnc_nml['mnc_01']['mnc_outdir_str'] = mnc_header
            data_mnc_nml.write(data_mnc_path, force=True)

        except IOError as exc:
            if exc.errno == errno.ENOENT:

                mnc_01_grp = {
                    'mnc_use_outdir': True,
                    'mnc_use_name_ni0': True,
                    'mnc_outdir_str': mnc_header,
                    'mnc_outdir_date': True,
                    'monitor_mnc': True
                }
                data_mnc_nml = f90nml.Namelist(mnc_01=mnc_01_grp)
                data_mnc_nml.write(data_mnc_path)
            else:
                raise
示例#13
0
def write_namelist(namelist: dict, path: str) -> None:
    logger.info(f'writing namelist to {path}')
    nml = f90nml.Namelist(namelist)
    nml.indent = 0
    nml.write(path, force=True)
示例#14
0
def write_namelist(namelist: dict, path: str) -> None:
    nml = f90nml.Namelist(namelist)
    nml.indent = 0
    nml.write(path, force=True)
示例#15
0
def write_ppt_files(tasks):
    freqgroups = cmor_utils.group(tasks, get_output_freq)
    for freq1 in freqgroups:
        for freq2 in freqgroups:
            if freq2 > freq1 and freq2 % freq1 == 0:
                freqgroups[freq2] = freqgroups[freq1] + freqgroups[freq2]
    for freq in freqgroups:
        mfp2df, mfpphy, mfp3dfs, mfp3dfp, mfp3dfv = [], [], [], [], []
        alevs, plevs, hlevs = [], [], []
        for task in freqgroups[freq]:
            zaxis, levs = cmor_target.get_z_axis(task.target)
            root_codes = task.source.get_root_codes()
            if not zaxis:
                for code in root_codes:
                    if code in cmor_source.ifs_source.grib_codes_3D:
                        log.warning(
                            "3D grib code %s used in 2D cmor-target %s..."
                            "assuming this is on model levels" %
                            (str(code), task.target.variable))
                        mfp3dfs.append(code)
                    elif code in cmor_source.ifs_source.grib_codes_2D_dyn:
                        log.info(
                            "Adding grib code %s to MFP2DF %dhr ppt file for variable "
                            "%s in table %s" %
                            (str(code), freq, task.target.variable,
                             task.target.table))
                        mfp2df.append(code)
                    elif code in cmor_source.ifs_source.grib_codes_2D_phy:
                        log.info(
                            "Adding grib code %s to MFPPHY %dhr ppt file for variable "
                            "%s in table %s" %
                            (str(code), freq, task.target.variable,
                             task.target.table))
                        mfpphy.append(code)
                    else:
                        log.error("Unknown IFS grib code %s skipped" %
                                  str(code))
            else:
                for code in root_codes:
                    if code in cmor_source.ifs_source.grib_codes_3D:
                        if zaxis in cmor_target.model_axes:
                            log.info(
                                "Adding grib code %s to MFP3DFS %dhr ppt file for variable "
                                "%s in table %s" %
                                (str(code), freq, task.target.variable,
                                 task.target.table))
                            mfp3dfs.append(code)
                            alevs.extend(levs)
                        elif zaxis in cmor_target.pressure_axes:
                            log.info(
                                "Adding grib code %s to MFP3DFP %dhr ppt file for variable "
                                "%s in table %s" %
                                (str(code), freq, task.target.variable,
                                 task.target.table))
                            mfp3dfp.append(code)
                            plevs.extend(levs)
                        elif zaxis in cmor_target.height_axes:
                            log.info(
                                "Adding grib code %s to MFP3DFV %dhr ppt file for variable "
                                "%s in table %s" %
                                (str(code), freq, task.target.variable,
                                 task.target.table))
                            mfp3dfv.append(code)
                            hlevs.extend(levs)
                        else:
                            log.error(
                                "Axis type %s unknown, adding grib code %s"
                                "to model level variables" %
                                (zaxis, str(code)))
                    elif code in cmor_source.ifs_source.grib_codes_2D_dyn:
                        mfp2df.append(code)
                    elif code in cmor_source.ifs_source.grib_codes_2D_phy:
                        mfpphy.append(code)
                    else:
                        log.error("Unknown IFS grib code %s skipped" %
                                  str(code))
        if any(mfp3dfs):
            mfp2df.append(
                cmor_source.grib_code(134))  # Add surface pressure for 3D vars
        mfp2df = sorted(
            list(
                map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(),
                    set(mfp2df))))
        mfpphy = sorted(
            list(
                map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(),
                    set(mfpphy))))
        mfp3dfs = sorted(
            list(
                map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(),
                    set(mfp3dfs))))
        mfp3dfp = sorted(
            list(
                map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(),
                    set(mfp3dfp))))
        mfp3dfv = sorted(
            list(
                map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(),
                    set(mfp3dfv))))
        plevs = sorted(list(set([float(s) for s in plevs])))[::-1]
        hlevs = sorted(list(set([float(s) for s in hlevs])))
        namelist = {"CFPFMT": "MODEL"}
        if any(mfp2df):
            namelist["NFP2DF"] = len(mfp2df)
            namelist["MFP2DF"] = mfp2df
        if any(mfpphy):
            namelist["NFPPHY"] = len(mfpphy)
            namelist["MFPPHY"] = mfpphy
        if any(mfp3dfs):
            namelist["NFP3DFS"] = len(mfp3dfs)
            namelist["MFP3DFS"] = mfp3dfs
            namelist["NRFP3S"] = -1
        if any(mfp3dfp):
            namelist["NFP3DFP"] = len(mfp3dfp)
            namelist["MFP3DFP"] = mfp3dfp
            namelist["RFP3P"] = plevs
        if any(mfp3dfs):
            namelist["NFP3DFV"] = len(mfp3dfv)
            namelist["MFP3DFV"] = mfp3dfv
            namelist["RFP3V"] = hlevs
        nml = f90nml.Namelist({"NAMFPC": namelist})
        nml.uppercase, nml.end_comma = True, True
        f90nml.write(nml, "pptdddddd%04d" % (100 * freq, ))
示例#16
0
    def gensetup(self,
                 namepatch=None,
                 params=None,
                 templatepath=None,
                 setuppath=None,
                 cleansetup=False):
        """ Generate the MITgcm setup for the process model using a template
        setup.

        Args:
            namepatch (dict): A namelist patch dictionary with the structure
                namepatch[namelistfilename][namelist][variable] = value.
                If specified, it will be merged with the existing default.

            params (dict): A dictionary of parameters with the form 
                {paramname: value}. Each paramname is looked up in the 
                template namelists and, if found, changed to value.
                Each param must have a pre-existing template value for any 
                change to take place.

            setuppath (str): Path to the working directory in which to put
                the setup.

            cleansetup (bool): Boolean indicating whether to erase an
                existing setup.
        """

        if namepatch is not None:
            self.namelists = {**namepatch, **self.namelists}

        self.setup = Setup(setuppath, init=True, clean=cleansetup)
        self.saveinput()
        self.updatenamelists()
        self.updatesize()

        # Copy template
        dontcopylist = ['.swp', 'mitgcmuv']
        for dir in ['code', 'input']:
            for filename in os.listdir(getattr(self.template, dir)):
                if not any(bad in filename for bad in dontcopylist):
                    shutil.copy(
                        os.path.join(getattr(self.template, dir), filename),
                        os.path.join(getattr(self.setup, dir), filename))

        # Change size vars
        gcmutils.changesizevars(self.size, sizepath=self.setup.code)

        # Merge template namelists with model namelist
        fullnamelists = {}
        for nmlfile in self.namelists.keys():

            # Load namelist files from template directory, not newly-created
            # setup
            try:
                fullnamelists[nmlfile] = f90nml.read(
                    os.path.join(self.template.input, nmlfile))
            except FileNotFoundError:
                fullnamelists[nmlfile] = f90nml.Namelist(
                    self.namelists[nmlfile])

            # Paste-in new vars
            for nml in self.namelists[nmlfile].keys():
                try:
                    for var in self.namelists[nmlfile][nml].keys():
                        fullnamelists[nmlfile][nml][var] = (
                            self.namelists[nmlfile][nml][var])
                except KeyError:
                    fullnamelists[nmlfile][nml] = f90nml.Namelist(
                        self.namelists[nmlfile][nml])

        # Homogenize references to 'r' or 'z' in data namelist
        fullnamelists['data'] = gcmutils.correctdatanamelist(
            fullnamelists['data'], correctto='z')

        fullnamelists = self.trimnamelists(fullnamelists)

        # Save namelists
        for filename in fullnamelists.keys():
            savename = os.path.join(self.setup.input, filename)
            with open(savename, 'w') as namefile:
                fullnamelists[filename].write(savename, force=True)

        # Change parameters
        if params is not None:
            for param, value in params.items():
                self.setup.setparam(param, value, checksize=False)
示例#17
0
def write_namelist(params_dict,path):
    nml = f90nml.Namelist(params_dict)
    nml.end_comma = True
    nml.uppercase = True
    nml.indent = ''
    nml.write(path, force=True)
示例#18
0
 def test_namelist_default_index(self):
     d = {'x_nml': {'x': [1, 2, 3]}}
     test_nml = f90nml.Namelist(d, default_start_index=1)
示例#19
0
文件: run.py 项目: dxhisboy/swAmber
print("Updating namelist...")
nl = f90nml.read(indir + SEP + 'md.in')

for var in args.nlvar:
    sp = var.split('=', 1)
    if len(sp) != 2:
        print("Not recognized nlvar: " + var)
        sys.exit(1)
    path = list(map(str.strip, sp[0].split(".")))
    if len(path) < 1:
        print("Not recognized nlvar: " + var)
        sys.exit(1)
    node = nl
    for part in path[:-1]:
        if part not in node:
            node[part] = f90nml.Namelist()
        node = node[part]
    node[path[-1]] = eval(sp[1])
os.chdir(rundir)
nl.write("md.in")

pme_args = [
    "-O", "-i", "md.in", "-o", "mdout", "-inf", "mdinfo", "-r", "md.rst", "-x",
    "md.nc", "-l", "logfile", "-e", "mden"
]
if args.swlu:
    pme_args.append("-swlu")
dataset_json = json.JSONDecoder().decode(
    open(indir + SEP + "dataset.json").read())
dataset_vars = {"dataset": indir}
for k, v in dataset_json.items():
示例#20
0
 def test_set_repeat_flag(self):
     nml = f90nml.Namelist()
     nml.repeat_counter = True
     self.assertTrue(nml.repeat_counter)
示例#21
0
     'minutes': 0,
     'seconds': 0,
     'dt_atmos':720,
     'current_date' : [0001,1,1,0,0,0],
     'calendar' : 'thirty_day'
})

baseexp.namelist['atmosphere_nml']['idealized_moist_model'] = False #tells the model to use newtonian relaxation

baseexp.namelist['hs_forcing_nml'] = f90nml.Namelist({

    't_zero': 315.,    # temperature at reference pressure at equator (default 315K)
    't_strat': 200.,   # stratosphere temperature (default 200K)
    'delh': 60.,       # equator-pole temp gradient (default 60K)
    'delv': 10.,       # lapse rate (default 10K)
    'eps': 0.,         # stratospheric latitudinal variation (default 0K)
    'sigma_b': 0.7,    # boundary layer friction height (default p/ps = sigma = 0.7)

    # negative sign is a flag indicating that the units are days
    'ka':   -40.,      # Constant Newtonian cooling timescale (default 40 days)
    'ks':    -4.,      # Boundary layer dependent cooling timescale (default 4 days)
    'kf':   -1.,       # BL momentum frictional timescale (default 1 days)

    'do_conserve_energy':   True,  # convert dissipated momentum into heat (default True)
    })

#Lets do a run!
baseexp.runmonth(1, use_restart=False,num_cores=4, light=False)
for i in range(2,121):
    baseexp.runmonth(i, num_cores=4, light=False)
示例#22
0
    def setUp(self):
        # Move to test directory if running from setup.py
        if os.path.basename(os.getcwd()) != 'tests':
            os.chdir('tests')

        # Construct the reference namelist values

        self.empty_file = {}

        self.empty_nml = {'empty_nml': {}}

        self.null_nml = {
            'null_nml': {
                'null_value': None
            },
            'null_comma_nml': {
                'null_comma': None
            },
            'null_nocomma_rpt_nml': {
                'null_one': None,
                'null_two': None,
            }
        }

        self.unset_nml = {'unset_nml': {'x': None, 'y': None}}

        self.types_nml = {
            'types_nml': {
                'v_integer': 1,
                'v_float': 1.0,
                'v_complex': 1 + 2j,
                'v_logical': True,
                'v_string': 'Hello',
            }
        }

        self.vector_nml = {
            'vector_nml': {
                'v': [1, 2, 3, 4, 5],
                'v_idx': [1, 2, 3, 4],
                'v_idx_ooo': [1, 2, 3, 4],
                'v_range': [1, 2, 3, 4],
                'v_start_zero': [1, 2, 3, 4],
                'v_start_minusone': [1, 2, 3, 4, 5],
                'v_zero_adj': [1, None, 3, 4],
                'v_zero_adj_ooo': [1, None, 3, 4],
                'v_implicit_start': [1, 2, 3, 4],
                'v_implicit_end': [1, 2, 3, 4],
                'v_implicit_all': [1, 2, 3, 4],
                'v_null_start': [None, 2, 3, 4],
                'v_null_interior': [1, 2, None, 4],
                'v_null_end': [1, 2, 3, None],
                'v_zero': [1, 0, 3],
                'v_stride': [1, None, 3, None, 5, None, 7],
                'v_single': [1],
                'v_implicit_merge': [1, 2],
                'v_explicit_merge': [1, 2],
                'v_complex': [1 + 2j, 3 + 4j, 5 + 6j],
            }
        }

        self.str_array_nml = {
            'str_array_nml': {
                'names': [
                    'abcdefg',
                    'tuvwxyz',
                    'abc123',
                    '123abc',
                    'abcdefg',
                    'abcdefghijklmnopqrstuvwxyz',
                    'xyz321',
                ],
            }
        }

        self.multidim_nml = {
            'multidim_nml': {
                'v2d': [[1, 2], [3, 4]],
                'v3d': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
                'w3d': [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
                        [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23,
                                                              24]]],
                'v2d_explicit': [[1, 2], [3, 4]],
                'v2d_outer': [[1], [2], [3], [4]],
                'v2d_inner': [[1, 2, 3, 4]],
                'v2d_sparse': [[1, 2], [None, None], [5, 6]]
            }
        }

        self.multidim_ooo_nml = {
            'multidim_ooo_nml': {
                'a': [[1], [None, 2]],
                'b': [[1], [None, None, 3]],
            }
        }

        self.md_rowmaj_nml = {
            'multidim_nml': {
                'v2d': [[1, 3], [2, 4]],
                'v3d': [[[1, 5], [3, 7]], [[2, 6], [4, 8]]],
                'w3d': [[[1, 13], [5, 17], [9, 21]],
                        [[2, 14], [6, 18], [10, 22]],
                        [[3, 15], [7, 19], [11, 23]],
                        [[4, 16], [8, 20], [12, 24]]],
                'v2d_explicit': [[1, 3], [2, 4]],
                'v2d_outer': [[1, 2, 3, 4]],
                'v2d_inner': [[1], [2], [3], [4]],
                'v2d_sparse': [[1, None, 5], [2, None, 6]]
            }
        }

        self.dense_array_nml = {
            'sparse_array_nml': {
                'x': [
                    [1, None, None],
                    [None, None, None],
                    [None, None, 2],
                ]
            }
        }

        self.sparse_array_nml = {
            'sparse_array_nml': {
                'x': [
                    [1],
                    [],
                    [None, None, 2],
                ]
            }
        }

        self.default_one_index_nml = {
            'default_index_nml': {
                'v': [1, 2, 3, 4, 5]
            }
        }

        self.default_zero_index_nml = {
            'default_index_nml': {
                'v': [1, 2, None, 3, 4, 5]
            }
        }

        self.global_index_nml = {
            'global_index_nml': {
                'v_zero': [1, 2, 3, 4],
                'v_neg': [1, 2, 3, 4],
                'v_pos': [None, 1, 2, 3, 4]
            }
        }

        self.float_nml = {
            'float_nml': {
                'v_float': 1.,
                'v_decimal_start': .1,
                'v_decimal_end': 1.,
                'v_negative': -1.,
                'v_single': 1.,
                'v_double': 1.,
                'v_single_upper': 1.,
                'v_double_upper': 1.,
                'v_positive_index': 10.,
                'v_negative_index': 0.1,
                'v_no_exp_pos': 1.,
                'v_no_exp_neg': 1.,
                'v_no_exp_pos_dot': 1.,
                'v_no_exp_neg_dot': 1.,
                'v_neg_no_exp_pos': -1.,
                'v_neg_no_exp_neg': -1.,
                'v_pos_decimal': 0.01,
                'v_neg_decimal': -0.01,
            }
        }

        self.string_nml = {
            'string_nml': {
                'str_basic': 'hello',
                'str_no_delim': 'hello',
                'str_no_delim_token': '+hello',
                'str_no_delim_no_esc': "a''b",
                'single_esc_delim': "a 'single' delimiter",
                'double_esc_delim': 'a "double" delimiter',
                'double_nested': "''x'' \"y\"",
                'str_list': ['a', 'b', 'c'],
                'slist_no_space': ['a', 'b', 'c'],
                'slist_no_quote': ['a', 'b', 'c'],
                'slash': 'back\\slash',
            }
        }

        self.string_multiline_nml = {
            'string_multiline_nml': {
                'empty': '',
                'trailing_whitespace': '  '
            }
        }

        self.dtype_nml = {
            'dtype_nml': {
                'dt_scalar': {
                    'val': 1
                },
                'dt_stack': {
                    'outer': {
                        'inner': 2
                    }
                },
                'dt_vector': {
                    'vec': [1, 2, 3]
                }
            },
            'dtype_multi_nml': {
                'dt': {
                    'x': 1,
                    'y': 2,
                    'z': 3,
                }
            },
            'dtype_nested_nml': {
                'f': {
                    'g': {
                        'x': 1,
                        'y': 2,
                        'z': 3,
                    }
                }
            },
            'dtype_field_idx_nml': {
                'f': {
                    'x': [1, 2, 3]
                }
            },
            'dtype_vec_nml': {
                'a': {
                    'b': [{
                        'c': 1,
                        'd': 2
                    }, {
                        'c': 3,
                        'd': 4
                    }, {
                        'c': 5,
                        'd': 6
                    }]
                }
            },
            'dtype_sparse_vec_nml': {
                'a': {
                    'b': [{
                        'c': 2
                    }]  # NOTE: start_index is 2
                }
            },
            'dtype_single_value_vec_nml': {
                'a': [{
                    'b': 1
                }]
            },
            'dtype_single_vec_merge_nml': {
                'a': {
                    'b': [{
                        'c': 1,
                        'd': 2
                    }]
                }
            },
            'dtype_list_ooo_noidx': {
                'a': [
                    {
                        'i': 123
                    },
                    {
                        'i': 456
                    },
                    {
                        'i': 789
                    },
                ],
            },
        }

        self.dtype_case_nml = {
            'dtype_mixed': {
                'b': {
                    'c_d_e': [{
                        'id': 1
                    }, {
                        'id': 2
                    }]
                }
            },
            'dtype_list_in_list': {
                'b': {
                    'c': [{
                        'id': 1
                    }, {
                        'id': 2
                    }, {
                        'id': 3
                    }, {
                        'id': 4,
                        'd': {
                            'e': [10, 11]
                        }
                    }]
                }
            },
            'dtype_upper_scalar': {
                'b': {
                    'c': 1,
                    'd': [{
                        'id': 2
                    }],
                }
            },
            'dtype_upper_list': {
                'b': {
                    'c': [{
                        'id': 1
                    }, {
                        'id': 2
                    }]
                }
            },
            'dtype_index_overwrite': {
                'b': {
                    'c': [{
                        'd': 1,
                        'e': 2,
                        'f': 3,
                        'g': 4,
                        'h': 5
                    }]
                }
            },
            'dtype_list_staggered': {
                'b': {
                    'c': [{
                        'a': 1
                    }, None, None, {
                        'a': 1
                    }, None, None, None, {
                        'a': 1
                    }]
                }
            },
        }

        self.bcast_nml = {
            'bcast_nml': {
                'x': [2.0, 2.0],
                'y': [None, None, None],
                'z': [True, True, True, True],
            },
            'bcast_nml_comma': {
                'x': [2.0, 2.0],
                'y': [None, None, None],
                'z': [True, True, True, True],
            },
            'bcast_endnull_nml': {
                'x': [2.0, 2.0],
                'y': [None, None, None],
            },
            'bcast_endnull_nml_comma': {
                'x': [2.0, 2.0],
                'y': [None, None, None],
            },
            'bcast_mixed_nml': {
                'x': [1, 1, 1, 2, 3, 4],
                'y': [1, 1, 1, 2, 2, 3],
            },
            'bcast_mixed_null_nml': {
                'x': [1, None, None, None, 3, 4],
                'y': [1, 1, 1, None, None, None, 3, 4],
                'z': [1, None, None, None, None, 4],
            },
        }

        self.comment_nml = {
            'comment_nml': {
                'v_cmt_inline': 123,
                'v_cmt_in_str': 'This token ! is not a comment',
                'v_cmt_after_str': 'This ! is not a comment',
            }
        }

        self.comment_alt_nml = {'comment_alt_nml': {'x': 1, 'z': 3}}

        # NOTE: Methods for setting up namelists with repeated groups is still
        #   a work in progress.  This is a temporary solution to get past the
        #   issue and focus on other related matters.

        # Old repeat group method
        #
        # self.grp_repeat_nml = {
        #     'grp_repeat_nml': [{'x': 1}, {'x': 2}],
        #     'case_check_nml': [{'y': 1}, {'y': 2}],
        # }

        # Possibly temporary construction of repeated group
        self.grp_repeat_nml = f90nml.Namelist()
        self.grp_repeat_nml['grp_repeat_nml'] = {'x': 1}
        self.grp_repeat_nml.add_cogroup('grp_repeat_nml', {'x': 2})
        self.grp_repeat_nml['case_check_nml'] = {'y': 1}
        self.grp_repeat_nml.add_cogroup('case_check_nml', {'y': 2})

        self.key_repeat_nml = {'key_repeat_nml': {'a': 3}}

        self.f77_nml = {
            'f77_nml': {
                'x': 123
            },
            'next_f77_nml': {
                'y': 'abc'
            },
        }

        self.dollar_nml = {'dollar_nml': {'v': 1.}}

        self.multiline_nml = {
            'multiline_nml': {
                'x': [
                    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
                    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
                    1, 1, 1, 1, 1, 1, 1
                ]
            }
        }

        self.long_string_nml = {
            'long_string_nml': {
                's': ' '.join(12 * ['abcdefghijklmnopqrstuvwxyz'])
            }
        }

        self.ext_token_nml = {'ext_token_nml': {'x': 1}}

        self.list_patch_nml = {
            'list_patch_nml': {
                'x': ['1', '2', '3', '4', '5']
            }
        }

        self.repatch_nml = {
            'repatch_nml': {
                'x': [5, 6],
                'y': {
                    'z': 7
                }
            },
            'newgrp_nml': {
                'a': 1,
                'b': 2,
            }
        }

        self.winfmt_nml = {'blah': {'blah': 1}}

        self.extern_cmt_nml = {
            'efitin': {
                'abc': 0,
            }
        }

        self.ieee_nml = {
            'ieee_nml': {
                'base_inf': float('inf'),
                'neg_inf': float('-inf'),
                'plus_inf': float('inf'),
                'base_nan': float('nan'),
                'plus_nan': float('nan'),
                'neg_nan': float('nan'),
            }
        }

        if has_numpy:
            self.numpy_nml = {
                'numpy_nml':
                OrderedDict((
                    ('np_integer', numpy.int64(1)),
                    ('np_float', numpy.float64(1.0)),
                    ('np_complex', numpy.complex128(1 + 2j)),
                ))
            }

        if os.path.isfile('tmp.nml'):
            os.remove('tmp.nml')
示例#23
0
                # remove comments and additional whitespace
                params = [
                    p.partition(b'!')[0].strip() for p in params if p.strip()
                ]
                for p in params:
                    # search for first reference to parameter in the file
                    # and get the default value

                    #r = br'(\w+).*::\s*' + p + br'\s*=\s*([\w\d\.]+)'
                    if p:
                        r = br'(.*)' + p + br'.*=\s*((?:\(\/[\d\s,+\-.]+\/\))|(?:\'.*\')|(?:\".*\")|[\-+\w\d.]+)'
                        mp = re.search(r, data, re.IGNORECASE)
                        if mp:
                            if '!' in mp.group(1).decode():
                                continue
                            value = mp.group(2).decode()
                            for parse in parsers:
                                try:
                                    value = parse(value)
                                    break
                                except:
                                    continue
                            nml[p.decode()] = value
                        else:
                            nml[p.decode()] = 'UNDEFINED'

#print(namelist_defaults)
n = f90nml.Namelist()
n.update(namelist_defaults)
n.write('defaults.nml')
print('defaults.nml written')