Exemple #1
0
 def assert_write_path(self, nml, target_fname, sort=False):
     tmp_fname = 'tmp.nml'
     f90nml.write(nml, tmp_fname, sort=sort)
     try:
         self.assert_file_equal(tmp_fname, target_fname)
     finally:
         os.remove(tmp_fname)
Exemple #2
0
def retrieve_gribs_and_run_ungrib(js, q):
    """
    This function retrieves required GRIB files and runs ungrib.

    It returns either 'SUCCESS' or 'FAILURE' on completion.

    :param js: the JobState object containing the forecast configuration
    :param q: the multiprocessing Queue into which we will send either 'SUCCESS' or 'FAILURE'
    """
    try:
        logging.info("retrieving GRIB files.")

        # step 3: retrieve required GRIB files from the grib_source, symlink into GRIBFILE.XYZ links into wps
        manifest = js.grib_source.retrieve_gribs(js.start_utc, js.end_utc)
        js.grib_source.symlink_gribs(manifest, js.wps_dir)

        send_email(js, 'grib2', 'Job %s - %d GRIB2 files downloaded.' % (js.job_id, len(manifest)))
        logging.info("running UNGRIB")

        # step 4: patch namelist for ungrib end execute ungrib
        f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True)

        Ungrib(js.wps_dir).execute().check_output()

        send_email(js, 'ungrib', 'Job %s - ungrib complete.' % js.job_id)
        logging.info('UNGRIB complete')
        q.put('SUCCESS')

    except Exception as e:
        logging.error('GRIB2/UNGRIB step failed with exception %s' % repr(e))
        traceback.print_exc()
        q.put('FAILURE')
Exemple #3
0
 def assert_write(self, nml, target_fname):
     tmp_fname = "tmp.nml"
     f90nml.write(nml, tmp_fname)
     try:
         self.assert_file_equal(tmp_fname, target_fname)
     finally:
         os.remove(tmp_fname)
def write_wxy(wxy,filename):
	e = os.path.isfile(filename)
	if e:
        	print(filename, "exist")
        	os.remove(filename)
        	print(filename,"deleted")
	f90nml.write(wxy,filename);
	print("write a new wxy:",filename)
Exemple #5
0
 def assert_write_file(self, nml, target_fname, sort=False):
     tmp_fname = 'tmp.nml'
     with open(tmp_fname, 'w') as tmp_file:
         f90nml.write(nml, tmp_file, sort=sort)
         self.assertFalse(tmp_file.closed)
     try:
         self.assert_file_equal(tmp_fname, target_fname)
     finally:
         os.remove(tmp_fname)
Exemple #6
0
    def init_config(self):
        """Patch input.nml as a new or restart run."""

        input_fpath = os.path.join(self.work_path, 'input.nml')

        input_nml = f90nml.read(input_fpath)

        input_type = 'n' if self.expt.counter == 0 else 'r'
        input_nml['GOLD_input_nml']['input_filename'] = input_type

        f90nml.write(input_nml, input_fpath, force=True)
Exemple #7
0
    def init_config(self):
        """Patch input.nml as a new or restart run."""

        input_fpath = os.path.join(self.work_path, 'input.nml')

        input_nml = f90nml.read(input_fpath)

        if self.expt.counter == 0 or self.expt.repeat_run:
            input_type = 'n'
        else:
            input_type = 'r'
        input_nml['MOM_input_nml']['input_filename'] = input_type

        f90nml.write(input_nml, input_fpath, force=True)
Exemple #8
0
    def _write_file(self, outputfile, rawvals, do_patch=False, nml_file=None):
        patch=defaultdict( dict )

        for key,rawval in rawvals.items():
            if rawval is None:  # omit if value is None
                continue
            if isinstance(rawval,numpy.ndarray):
                rawval=list(rawval)  # necessary until f90nml supports numpy arrays
            patch[key[1]][key[0]]=rawval
        
        if do_patch:
            f90nml.patch(nml_file or self._nml_file,patch,outputfile)
        else:
            f90nml.write(patch, outputfile, force=True)      
Exemple #9
0
def readpar(par_file, root):
    """Read StagYY par file.

    The namelist is populated in chronological order with:

    - :data:`PAR_DEFAULT`, an internal dictionary defining defaults;
    - :data:`PAR_DFLT_FILE`, the global configuration par file;
    - ``par_name_defaultparameters`` if it is defined in ``par_file``;
    - ``par_file`` itself;
    - ``parameters.dat`` if it can be found in the StagYY output directories.

    Args:
        par_file (:class:`pathlib.Path`): path of par file.
        root (:class:`pathlib.Path`): path on which other paths are rooted.
            This is usually par.parent.
    Returns:
        :class:`f90nml.namelist.Namelist`: case insensitive dict of dict of
        values with first key being the namelist and second key the variables'
        name.
    """
    par_nml = deepcopy(PAR_DEFAULT)

    if PAR_DFLT_FILE.is_file():
        _enrich_with_par(par_nml, PAR_DFLT_FILE)
    else:
        PAR_DFLT_FILE.parent.mkdir(exist_ok=True)
        f90nml.write(par_nml, str(PAR_DFLT_FILE))

    if not par_file.is_file():
        raise NoParFileError(par_file)

    par_main = f90nml.read(str(par_file))
    if 'default_parameters_parfile' in par_main:
        par_dflt = par_main['default_parameters_parfile'].get(
            'par_name_defaultparameters', 'par_defaults')
        par_dflt = root / par_dflt
        if not par_dflt.is_file():
            raise NoParFileError(par_dflt)
        _enrich_with_par(par_nml, par_dflt)

    _enrich_with_par(par_nml, par_file)

    par_out = root / par_nml['ioin']['output_file_stem'] / '_parameters.dat'
    if par_out.is_file():
        _enrich_with_par(par_nml, par_out)
    par_out = root / par_nml['ioin']['hdf5_output_folder'] / 'parameters.dat'
    if par_out.is_file():
        _enrich_with_par(par_nml, par_out)
    return par_nml
Exemple #10
0
def save_InitCond(dict_initcond, year, grid=''):
    InitCond = {'initialconditions': dict_initcond.copy()}
    for k, v in InitCond['initialconditions'].items():
        if int(v) == v:
            InitCond['initialconditions'][k] = int(v)

    # save RunControl to file
    fn_nml = os.path.join(
        'Input',
        'InitialConditionstest{grid}_{year}.nml').format(
        grid=grid,
        year=year)
    if os.path.exists(fn_nml):
        os.remove(fn_nml)
    f90nml.write(InitCond, fn_nml)
Exemple #11
0
    def save(self, filename, convkey=None):
        """パラメータをファイルに保存する.

        Parameters
        ----------
        filename : str or Path
            保存するファイル名
        convkey : UnitConversionKey, optional
            単位変換キー, by default None
        """
        convkey = convkey or self.convkey
        with open(filename, 'wt', encoding='utf-8') as f:
            if convkey is not None:
                f.write('!!key {}\n'.format(convkey.keytext))
            f90nml.write(self.nml, f, force=True)
Exemple #12
0
def test_run_failure_confusing_emissions_scenarios(package, invalid_config):
    f90nml.write(invalid_config,
                 join(package.run_dir, "MAGCFG_USER.CFG"),
                 force=True)

    error_msg = re.escape(
        "You have more than one `FILE_EMISSCEN_X` flag set. Using more than one "
        "emissions scenario is hard to debug and unnecessary with Pymagicc's "
        "dataframe scenario input. Please combine all your scenarios into one "
        "dataframe with Pymagicc and pandas, then feed this single Dataframe into "
        "Pymagicc's run API.")
    with pytest.raises(ValueError, match=error_msg):
        package.run()

    assert package.config is None
Exemple #13
0
def namelist_set(filename, setvariable, setvalue, verbose=False):
    '''
    Set a variable from a namelist to a value
      input arguments:
        filename: filename of namelist
        setvariable: GROUP_NAME:VARIABLE_NAME to set from namelist
        setvalue: value to set setvariable to
        verbose: optional boolean argument if results should be printed to screen
    '''
    namelist = f90nml.read(filename)
    path = setvariable.split(':')
    crumb = namelist
    while len(path) > 1:
        crumb = crumb[path[0]]
        path.pop(0)
    # dealing with different types..
    t = type(crumb[path[0]])
    if isinstance(crumb[path[0]], int):  # integer
        crumb[path[0]] = int(setvalue)
    elif isinstance(crumb[path[0]], float):  # float
        crumb[path[0]] = float(setvalue)
    elif isinstance(crumb[path[0]], str):  # string
        crumb[path[0]] = setvalue
    elif isinstance(crumb[path[0]], list):  # list
        t = type(crumb[path[0]][0])

        # deal with trailing ',' leading to empty string, crashing int() and float()
        l = setvalue.split(',')
        while l[-1] == "":
            l.pop()

        if isinstance(crumb[path[0]][0], int):  # integer
            crumb[path[0]] = [int(i) for i in l]
        if isinstance(crumb[path[0]][0], float):  # float
            crumb[path[0]] = [float(i) for i in l]
        if isinstance(crumb[path[0]][0], str):  # string
            crumb[path[0]] = l
    elif isinstance(crumb[path[0]], bool):  # boolean
        if setvalue == '.true.':
            crumb[path[0]] = True
        elif setvalue == '.false.':
            crumb[path[0]] = False
        else:
            print "Cannot parse boolean, use .true. or .false."
    else:
        print "Unsupported type: ", t
    # write namelist variable
    f90nml.write(namelist, filename, force=True)
Exemple #14
0
def namelist_set(filename, setvariable, setvalue, verbose=False):
    '''
    Set a variable from a namelist to a value
      input arguments:
        filename: filename of namelist
        setvariable: GROUP_NAME:VARIABLE_NAME to set from namelist
        setvalue: value to set setvariable to
        verbose: optional boolean argument if results should be printed to screen
    '''
    namelist = f90nml.read( filename )
    path = setvariable.split ( ':' )
    crumb = namelist
    while len(path) > 1:
        crumb = crumb[ path[0] ]
        path.pop(0)
    # dealing with different types..
    t = type(crumb[path[0]])
    if isinstance(crumb[path[0]], int):  # integer
        crumb[ path[0] ] = int(setvalue)
    elif isinstance(crumb[path[0]], float):  # float
        crumb[ path[0] ] = float(setvalue)
    elif isinstance(crumb[path[0]], str):  # string
        crumb[ path[0] ] = setvalue
    elif isinstance(crumb[path[0]], list):  # list
        t = type( crumb[path[0]][0] )

        # deal with trailing ',' leading to empty string, crashing int() and float()
        l = setvalue.split(',')
        while l[-1] == "":
            l.pop()

        if isinstance(crumb[path[0]][0], int):  # integer
            crumb[ path[0] ] = [int(i) for i in l]
        if isinstance(crumb[path[0]][0], float):  # float
            crumb[ path[0] ] = [float(i) for i in l]
        if isinstance(crumb[path[0]][0], str):  # string
            crumb[ path[0] ] = l
    elif isinstance(crumb[path[0]], bool):  # boolean
        if setvalue == '.true.':
            crumb[ path[0] ] = True
        elif setvalue == '.false.':
            crumb[ path[0] ] = False
        else:
            print "Cannot parse boolean, use .true. or .false."
    else:
        print "Unsupported type: ", t
    # write namelist variable
    f90nml.write( namelist, filename, force=True )
Exemple #15
0
 def saveNamelist(self, filename):
     '''
     Write a namelist file to the requested path based on the current content of this object
     :param filename: the path of the file to write
     :return: None
     '''
     a = {'input_nml': {}}
     a['input_nml']['all_qf'] = self.all_qf
     a['input_nml']['sensible_qf'] = self.sensible_qf
     a['input_nml']['latent_qf'] = self.latent_qf
     a['input_nml']['wastewater_qf'] = self.wastewater_qf
     # a['input_nml']['start_dates'] = self.dt_start.strftime('YYYY-mm-dd')
     # a['input_nml']['end_dates'] = self.dt_start.strftime('YYYY-mm-dd')
     a['input_nml']['start_dates'] = self.dt_start.strftime('%Y-%m-%d')
     a['input_nml']['end_dates'] = self.dt_start.strftime('%Y-%m-%d')
     nml.write(a, filename)
def _inject_pymagicc_compatible_magcfg_user(magicc):
    """
    Overwrite ``magicc.run_dir / MAGCFG_USER.CFG`` with config that only point to ``MAGTUNE_PYMAGICC.CFG``

    Parameters
    ----------
    magicc : :obj:`pymagicc.MAGICC7`
        Instance of :obj:`pymagicc.MAGICC7` to setup
    """
    LOGGER.info("Writing Pymagicc compatible MAGCFG_USER.CFG in %s",
                magicc.run_dir)
    with open(os.path.join(magicc.run_dir, "MAGCFG_USER.CFG"),
              "w") as file_handle:
        f90nml.write({"nml_allcfgs": {
            "file_tuningmodel_1": "PYMAGICC"
        }}, file_handle)
Exemple #17
0
    def generate_parfiles(self):
        """
        Creates separate parfiles from the main namelist container and writes
        the individual parfiles to disk.

        Returns
        -------
        parfiles : list of str
            List containing the paths of the parfiles, can be passed to the legolas
            runner.

        """
        run_dict = {key: {} for key in self.container.keys()}
        # savelist must be present
        try:
            run_dict["savelist"]
        except KeyError:
            run_dict.update({"savelist": {}})

        for current_run in range(self.nb_runs):
            prefix = "{:04d}".format(current_run + 1)
            if self.nb_runs == 1:
                prefix = ""
            # generate dictionary for this specific run
            for namelist, items in self.container.items():
                for key, values in items.items():
                    run_dict[namelist].update({key: values[current_run]})
            # parfile name
            parfile_name = f"{prefix}{self.basename}.par"
            # datfile name (no extension .dat needed)
            datfile_name = "".join([
                f"{prefix}",
                run_dict["savelist"].get("basename_datfile", self.basename),
            ])
            run_dict["savelist"].update({"basename_datfile": datfile_name})
            # logfile name (no extension .log needed)
            logfile_name = run_dict["savelist"].get("basename_logfile", None)
            if logfile_name is not None:
                logfile_name = f"{prefix}{logfile_name}"
                run_dict["savelist"].update({"basename_logfile": logfile_name})

            # set paths and write parfile
            parfile_path = (self.output_dir / parfile_name).resolve()
            self.parfiles.append(str(parfile_path))
            f90nml.write(run_dict, parfile_path, force=True)
        pylboLogger.info(f"parfiles generated and saved to {self.output_dir}")
        return self.parfiles
Exemple #18
0
    def perform(self, args):

        namelist = args.namelist["_"]

        parser = f90nml.Parser()

        if os.path.exists(namelist):
            nml = parser.read(namelist)

        else:
            nml = parser.reads(namelist)

        if args.outfile:

            f90nml.write(nml, args.outfile["_"])

        self.add_forward(data=nml.todict(complex_tuple=True))
Exemple #19
0
def nmlwrite(nml, **kwargs):

    output_fmt = kwargs.get("format")

    _out = kwargs.get("_out", sys.stdout)
    # determine output format
    valid_formats = ('json', 'yaml', 'nml')
    if output_fmt and output_fmt not in valid_formats:
        raise ValueError(
            "Error: format must be one of: {0}".format(valid_formats))

    if _out != sys.stdout:
        output_file = open(_out, 'w')
        output_fname = _out
    else:
        output_file = _out
        output_fname = None

    # Get output format from out_file name
    if not output_fmt:
        if output_fname:
            _, output_ext = os.path.splitext(output_fname)
            if output_ext == '.json':
                output_fmt = 'json'
            elif output_ext in ('.yaml', '.yml'):
                output_fmt = 'yaml'
            else:
                output_fmt = 'nml'
        else:
            output_fmt = 'nml'

    # write nml to output_file
    if output_fmt == "json":
        json.dump(nml, output_file, indent=4, separators=(',', ': '))
        output_file.write('\n')
    elif output_fmt == "yaml":
        input_data = nml.todict(complex_tuple=True)
        yaml.dump(input_data, output_file, default_flow_style=False)
    else:
        f90nml.write(nml, output_file)

    # Cleanup
    if _out != sys.stdout:
        output_file.close()
Exemple #20
0
    def set_config(self,
                   filename='MAGTUNE_SIMPLE.CFG',
                   top_level_key='nml_allcfgs',
                   **kwargs):
        """
        Create a configuration file for MAGICC

        Writes a fortran namelist in run_dir.
        :param filename:
        :param top_level_key:
        :param kwargs: Other parameters to pass to the configuration file. No
            validation on the parameters is performed.
        :return: A dict containing the contents of the namelist which was
            written to file
        """
        fname = join(self.run_dir, filename)
        data = {top_level_key: kwargs}
        f90nml.write(data, fname, force=True)

        return data
Exemple #21
0
    def write_namelists(self, run_dir):

        # TODO(JLM): make the setup namelists @properties without setter (protect them)
        # write hydro.namelist for the job
        self.hydro_namelist_file = run_dir.joinpath(self.job_date_id +
                                                    '.hydro.namelist')
        f90nml.write(self.hydro_namelist, self.hydro_namelist_file)
        nlst_file = run_dir.joinpath('hydro.namelist')
        if nlst_file.exists():
            nlst_file.unlink()
        nlst_file.symlink_to(self.hydro_namelist_file)

        # write namelist.hrldas
        self.namelist_hrldas_file = run_dir.joinpath(self.job_date_id +
                                                     '.namelist.hrldas')
        f90nml.write(self.namelist_hrldas, self.namelist_hrldas_file)
        nlst_file = run_dir.joinpath('namelist.hrldas')
        if nlst_file.exists():
            nlst_file.unlink()
        nlst_file.symlink_to(self.namelist_hrldas_file)
Exemple #22
0
def save_initcond_nml(
    df_state: pd.DataFrame, site: str = "", path_dir_save: Path = Path("."),
) -> Path:
    # get last time step
    try:
        tstep_last = df_state.index.levels[0].max()
    except AttributeError:
        logger_supy.exception(
            (
                "incorrect structure detected;"
                + " check if `df_state` is the final model state."
            )
        )
        return

    # get year for filename formatting
    year_last = tstep_last.year
    # generate a df with records of the last tstep
    df_state_last_tstep = df_state.loc[tstep_last]
    # get grid list
    list_grid = df_state_last_tstep.index

    # list holder for paths written out in nml
    list_path_nml = []
    for grid in list_grid:
        # generate nml filename
        filename_out_grid = f"InitialConditions{site}{grid}_{year_last}_EndofRun.nml"
        # derive a save path
        path_nml = path_dir_save / filename_out_grid
        # retrieve initcond values from `df_state_last_tstep`
        nml = {
            "InitialConditions": {
                key: df_state_last_tstep.loc[grid, var]
                for key, var in dict_init_nml.items()
            }
        }
        # save nml
        f90nml.write(nml, path_nml, force=True)
        # f90nml.write(nml, nml_file,force=True)
        list_path_nml.append(path_nml)
    return list_path_nml
Exemple #23
0
 def create_constants(constants_yaml, output_path):
     """Method to just create a constants file, bypassing
     all the NetCDF setup."""
     yaml = YAML(typ='safe')
     # If no output_path provided, create in same place and same name as YAML
     if output_path is None:
         output_path = f'{os.path.splitext(constants_yaml)[0]}.nml'
     # Open the YAML constants file and read contents
     with open(constants_yaml, 'r') as constants_file:
         constants = yaml.load(constants_file)
         # Open the NML constants file and write
         with open(output_path, 'w') as nml_file:
             allocatable_array_sizes = {}
             for grp in constants.values():
                 for k, v in grp.items():
                     if isinstance(v, list):
                         allocatable_array_sizes['n_{0}'.format(k)] = len(v)
             f90nml.write(
                 {'allocatable_array_sizes': allocatable_array_sizes},
                 nml_file)
             f90nml.write(constants, nml_file)
     print('Done! Constants file saved to ' + output_path)
Exemple #24
0
    def update_config(self,
                      filename="MAGTUNE_PYMAGICC.CFG",
                      top_level_key="nml_allcfgs",
                      **kwargs):
        """Updates a configuration file for MAGICC

        Updates the contents of a fortran namelist in the run directory,
        creating a new namelist if none exists.

        Parameters
        ----------
        filename : str
            Name of configuration file to write

        top_level_key : str
            Name of namelist to be written in the
            configuration file

        kwargs
            Other parameters to pass to the configuration file. No
            validation on the parameters is performed.

        Returns
        -------
        dict
            The contents of the namelist which was written to file
        """
        kwargs = self._fix_any_backwards_emissions_scen_key_in_config(kwargs)

        fname = join(self.run_dir, filename)

        if exists(fname):
            conf = f90nml.read(fname)
        else:
            conf = {top_level_key: {}}
        conf[top_level_key].update(kwargs)
        f90nml.write(conf, fname, force=True)

        return conf
Exemple #25
0
def write_config(p):
    emis_key = "file_emissionscenario" if p.version == 6 else "FILE_EMISSCEN"
    outpath = join(p.run_dir, "MAGTUNE_SIMPLE.CFG")
    f90nml.write({"nml_allcfgs": {
        emis_key: "RCP26.SCEN"
    }},
                 outpath,
                 force=True)

    # Write years config.
    outpath_years = join(p.run_dir, "MAGCFG_NMLYEARS.CFG")
    f90nml.write(
        {
            "nml_years": {
                "startyear": 1765,
                "endyear": 2100,
                "stepsperyear": 12
            }
        },
        outpath_years,
        force=True,
    )
Exemple #26
0
    def set_config(
        self, filename="MAGTUNE_PYMAGICC.CFG", top_level_key="nml_allcfgs", **kwargs
    ):
        """
        Create a configuration file for MAGICC

        Writes a fortran namelist in run_dir.

        # Parameters
        filename (str): Name of configuration file to write
        top_level_key (str): Name of namelist to be written in the
            configuration file
        kwargs: Other parameters to pass to the configuration file. No
            validation on the parameters is performed.

        # Returns
        data (dict): The contents of the namelist which was written to file
        """
        fname = join(self.run_dir, filename)
        data = {top_level_key: kwargs}
        f90nml.write(data, fname, force=True)

        return data
Exemple #27
0
    def write_namelist_parameters(self,
                                  outputfile,
                                  do_patch=False,
                                  nml_file=None):
        patch = defaultdict(dict)
        for p in self._namelist_parameters.values():
            name = p["name"]
            group_name = p["group_name"]
            group = patch[group_name]
            short = p["short"]
            parameter_set_name = p.get("set_name", "parameters_" + group_name)
            parameter_set = getattr(self, parameter_set_name)
            if getattr(parameter_set, name) is None:  # omit if value is None
                continue
            if is_quantity(p["default"]):
                group[short] = to_quantity(getattr(
                    parameter_set, name)).value_in(p["default"].unit)
            else:
                group[short] = getattr(parameter_set, name)

        if do_patch:
            f90nml.patch(nml_file or self._nml_file, patch, outputfile)
        else:
            f90nml.write(patch, outputfile, force=True)
Exemple #28
0
def skim_namelist_copy(input_path, output_path, *, date, prefix, hours=18):
    old_dic = f90nml.read(os.path.join(input_path, 'namelist.wps'))

    dt_object = pd.to_datetime(date)
    d_init = dt_object.strftime('%Y-%m-%d_%T')
    d_end = dt_object + pd.DateOffset(hours=hours)
    d_end = d_end.strftime('%Y-%m-%d_%T')

    old_dic['share']['start_date'] = old_dic['share']['max_dom'] * [d_init]
    old_dic['share']['end_date'] = old_dic['share']['max_dom'] * [d_end]
    old_dic['share']['interval_seconds'] = 6 * 3600
    old_dic['ungrib']['prefix'] = prefix
    sections = ['share', 'ungrib']
    drops = {}
    new_dic = OrderedDict()
    for s in sections:
        new_dic[s] = old_dic[s]
        if s in drops.keys():
            for d in drops[s]:
                new_dic[s].pop(d)
    f90nml.write(new_dic,
                 os.path.join(output_path, 'namelist.wps'),
                 force=True)
    return new_dic
Exemple #29
0
def retrieve_gribs_and_run_ungrib(js, q):
    """
    This function retrieves required GRIB files and runs ungrib.

    It returns either 'SUCCESS' or 'FAILURE' on completion.

    :param js: the JobState object containing the forecast configuration
    :param q: the multiprocessing Queue into which we will send either 'SUCCESS' or 'FAILURE'
    """
    try:
        logging.info("retrieving GRIB files.")

        # step 3: retrieve required GRIB files from the grib_source, symlink into GRIBFILE.XYZ links into wps
        manifest = js.grib_source.retrieve_gribs(js.start_utc, js.end_utc)
        js.grib_source.symlink_gribs(manifest, js.wps_dir)

        send_email(
            js, 'grib2',
            'Job %s - %d GRIB2 files downloaded.' % (js.job_id, len(manifest)))
        logging.info("running UNGRIB")

        # step 4: patch namelist for ungrib end execute ungrib
        f90nml.write(js.wps_nml,
                     osp.join(js.wps_dir, 'namelist.wps'),
                     force=True)

        Ungrib(js.wps_dir).execute().check_output()

        send_email(js, 'ungrib', 'Job %s - ungrib complete.' % js.job_id)
        logging.info('UNGRIB complete')
        q.put('SUCCESS')

    except Exception as e:
        logging.error('GRIB2/UNGRIB step failed with exception %s' % repr(e))
        traceback.print_exc()
        q.put('FAILURE')
Exemple #30
0
 def save_prm(self, path):
     f90nml.write(self.get_prm(), path)
Exemple #31
0
def execute(args,job_args):
    """
    Executes a weather/fire simulation.

    :param args: a dictionary with all to start the simulationfollowing keys
    :param job_args: a the original json given the forecast

    Keys in args:
    :param grid_code: the (unique) code of the grid that is used
    :param sys_install_path: system installation directory
    :param start_utc: start time of simulation in UTC
    :param end_utc: end time of simulation in UTC
    :param workspace_path: workspace directory
    :param wps_install_path: installation directory of WPS that will be used
    :param wrf_install_path: installation directory of WRF that will be used
    :param grib_source: a string identifying a valid GRIB2 source
    :param wps_namelist_path: the path to the namelist.wps file that will be used as template
    :param wrf_namelist_path: the path to the namelist.input file that will be used as template
    :param fire_namelist_path: the path to the namelist.fire file that will be used as template
    :param wps_geog_path: the path to the geogrid data directory providing terrain/fuel data
    :param email_notification: dictionary containing keys address and events indicating when a mail should be fired off
 
    
    """

    # step 0 initialize the job state from the arguments
    js = JobState(args)

    jobdir = osp.abspath(osp.join(js.workspace_path, js.job_id))
    make_clean_dir(jobdir)

    json.dump(job_args, open(osp.join(jobdir,'input.json'),'w'), indent=4, separators=(',', ': '))
    jsub = make_job_file(js)
    json.dump(jsub, open(jsub.jobfile,'w'), indent=4, separators=(',', ': '))
 
    logging.info("job %s starting [%d hours to forecast]." % (js.job_id, js.fc_hrs))
    sys.stdout.flush()
    send_email(js, 'start', 'Job %s started.' % js.job_id)

    # read in all namelists
    js.wps_nml = f90nml.read(js.args['wps_namelist_path'])
    js.wrf_nml = f90nml.read(js.args['wrf_namelist_path'])
    js.fire_nml = f90nml.read(js.args['fire_namelist_path'])
    js.ems_nml = None
    if 'emissions_namelist_path' in js.args:
        js.ems_nml = f90nml.read(js.args['emissions_namelist_path'])
    
    # Parse and setup the domain configuration
    js.domain_conf = WPSDomainConf(js.domains)

    num_doms = len(js.domain_conf)
    js.wps_nml['share']['start_date'] = [utc_to_esmf(js.start_utc)] * num_doms
    js.wps_nml['share']['end_date'] = [utc_to_esmf(js.end_utc)] * num_doms
    js.wps_nml['share']['interval_seconds'] = 3600

    logging.info("number of domains defined is %d." % num_doms)

    # build directories in workspace
    js.wps_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wps'))
    js.wrf_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wrf'))

    #check_obj(args,'args')
    #check_obj(js,'Initial job state')

    # step 1: clone WPS and WRF directories
    logging.info("cloning WPS into %s" % js.wps_dir)
    cln = WRFCloner(js.args)
    cln.clone_wps(js.wps_dir, js.grib_source.vtables(), [])

    # step 2: process domain information and patch namelist for geogrid
    js.wps_nml['geogrid']['geog_data_path'] = js.args['wps_geog_path']
    js.domain_conf.prepare_for_geogrid(js.wps_nml, js.wrf_nml, js.wrfxpy_dir, js.wps_dir)
    f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True)

    # do steps 2 & 3 & 4 in parallel (two execution streams)
    #  -> GEOGRID ->
    #  -> GRIB2 download ->  UNGRIB ->

    proc_q = Queue()
    geogrid_proc = Process(target=run_geogrid, args=(js, proc_q))
    grib_proc = Process(target=retrieve_gribs_and_run_ungrib, args=(js, proc_q))


    logging.info('starting GEOGRID and GRIB2/UNGRIB')
    geogrid_proc.start()
    grib_proc.start()

    # wait until both tasks are done
    logging.info('waiting until both tasks are done')
    grib_proc.join()
    geogrid_proc.join()

    if proc_q.get() != 'SUCCESS':
        return

    if proc_q.get() != 'SUCCESS':
        return

    proc_q.close()

    # step 5: execute metgrid after ensuring all grids will be processed
    js.domain_conf.prepare_for_metgrid(js.wps_nml)
    f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True)

    logging.info("running METGRID")
    Metgrid(js.wps_dir).execute().check_output()

    send_email(js, 'metgrid', 'Job %s - metgrid complete.' % js.job_id)
    logging.info("cloning WRF into %s" % js.wrf_dir)

    # step 6: clone wrf directory, symlink all met_em* files, make namelists
    cln.clone_wrf(js.wrf_dir, [])
    symlink_matching_files(js.wrf_dir, js.wps_dir, "met_em*")
    time_ctrl = update_time_control(js.start_utc, js.end_utc, num_doms)
    js.wrf_nml['time_control'].update(time_ctrl)
    update_namelist(js.wrf_nml, js.grib_source.namelist_keys())
    if 'ignitions' in js.args:
        update_namelist(js.wrf_nml, render_ignitions(js, num_doms))

    # if we have an emissions namelist, automatically turn on the tracers
    if js.ems_nml is not None:
        logging.debug('namelist.fire_emissions given, turning on tracers')
        f90nml.write(js.ems_nml, osp.join(js.wrf_dir, 'namelist.fire_emissions'), force=True)
        js.wrf_nml['dynamics']['tracer_opt'] = [2] * num_doms

    f90nml.write(js.wrf_nml, osp.join(js.wrf_dir, 'namelist.input'), force=True)

    f90nml.write(js.fire_nml, osp.join(js.wrf_dir, 'namelist.fire'), force=True)

    # step 7: execute real.exe
    
    logging.info("running REAL")
    # try to run Real twice as it sometimes fails the first time
    # it's not clear why this error happens 
    try:
        Real(js.wrf_dir).execute().check_output()
    except Exception as e:
        logging.error('Real step failed with exception %s, retrying ...' % str(e))
        Real(js.wrf_dir).execute().check_output()
    

    # step 7b: if requested, do fuel moisture DA
    if js.fmda is not None:
        logging.info('running fuel moisture data assimilation')
        for dom in js.fmda.domains:
            assimilate_fm10_observations(osp.join(wrf_dir, 'wrfinput_d%02d' % dom), None, js.fmda.token)

    # step 8: execute wrf.exe on parallel backend
    logging.info('submitting WRF job')
    send_email(js, 'wrf_submit', 'Job %s - wrf job submitted.' % js.job_id)

    js.task_id = "sim-" + js.grid_code + "-" + utc_to_esmf(js.start_utc)[:10]
    jsub.job_num=WRF(js.wrf_dir, js.qsys).submit(js.task_id, js.num_nodes, js.ppn, js.wall_time_hrs)

    send_email(js, 'wrf_exec', 'Job %s - wrf job starting now with id %s.' % (js.job_id, js.task_id))
    logging.info("WRF job %s submitted with id %s, waiting for rsl.error.0000" % (jsub.job_num, js.task_id))
  
    jobfile = osp.abspath(osp.join(js.workspace_path, js.job_id,'job.json'))
    json.dump(jsub, open(jobfile,'w'), indent=4, separators=(',', ': '))

    process_output(js.job_id)
Exemple #32
0
def retrieve_gribs_and_run_ungrib(js, grib_source, q):
    """
    This function retrieves required GRIB files and runs ungrib.

    It returns either 'SUCCESS' or 'FAILURE' on completion.

    :param js: the JobState object containing the forecast configuration
    :param grib_source: the GribSource object containing ungrib configuration
    :param q: the multiprocessing Queue into which we will send either 'SUCCESS' or 'FAILURE'
    """
    wps_dir = osp.abspath(js.wps_dir)
    grib_dir = osp.join(wps_dir, grib_source.id)
    make_clean_dir(grib_dir)
    wps_nml = js.wps_nml
    try:
        logging.info("retrieving GRIB files from %s" % grib_source.id)

        download_whole_cycle = js.get('download_whole_cycle', False)
        manifest = grib_source.retrieve_gribs(js.start_utc, js.end_utc,
                                              js.ref_utc, js.cycle_start_utc,
                                              download_whole_cycle)
        # logging.info('manifest: ' + str(manifest))

        cache_colmet = len(manifest) > 1
        have_all_colmet = False
        if cache_colmet:
            have_all_colmet = len(manifest.colmet_missing) == 0
            colmet_dir = osp.join(grib_source.cache_dir,
                                  manifest.colmet_prefix)

        logging.info('cache colmet %s, have all colmet %s' %
                     (cache_colmet, have_all_colmet))

        if not have_all_colmet:
            # this is also if we do not cache
            grib_source.symlink_gribs(manifest.grib_files, grib_dir)

            send_email(
                js, 'grib2', 'Job %s - %d GRIB2 files downloaded.' %
                (js.job_id, len(manifest)))
            logging.info("running UNGRIB for %s" % grib_source.id)

            logging.info(
                "step 4: patch namelist for ungrib end execute ungrib on %s files"
                % grib_source.id)

            update_namelist(wps_nml, grib_source.namelist_wps_keys())
            if cache_colmet:
                wps_nml['share']['start_date'] = [
                    utc_to_esmf(manifest.colmet_files_utc[0])
                ] * js.num_doms
                wps_nml['share']['end_date'] = [
                    utc_to_esmf(manifest.colmet_files_utc[-1])
                ] * js.num_doms

            # logging.info("namelist.wps for UNGRIB: %s" % json.dumps(wps_nml, indent=4, separators=(',', ': ')))
            f90nml.write(wps_nml,
                         osp.join(grib_dir, 'namelist.wps'),
                         force=True)
            grib_source.clone_vtables(grib_dir)
            symlink_unless_exists(osp.join(wps_dir, 'ungrib.exe'),
                                  osp.join(grib_dir, 'ungrib.exe'))

            print(grib_dir + ':')
            os.system('ls -l %s' % grib_dir)

            Ungrib(grib_dir).execute().check_output()

            print(grib_dir + ':')
            os.system('ls -l %s' % grib_dir)

            if cache_colmet:
                # move output to cache directory
                make_dir(colmet_dir)
                for f in manifest.colmet_files:
                    move(osp.join(grib_dir, f), osp.join(colmet_dir, f))
                # now all colmet files should be in the cache

        if cache_colmet:
            for f in manifest.colmet_files:
                symlink_unless_exists(osp.join(colmet_dir, f),
                                      osp.join(wps_dir, f))
        else:
            # move output
            for f in glob.glob(osp.join(grib_dir, grib_source.prefix() + '*')):
                move(f, wps_dir)

        send_email(js, 'ungrib', 'Job %s - ungrib complete.' % js.job_id)
        logging.info('UNGRIB complete for %s' % grib_source.id)
        q.put('SUCCESS')

    except Exception as e:
        logging.error('GRIB2/UNGRIB step failed with exception %s' % repr(e))
        traceback.print_exc()
        q.put('FAILURE')
Exemple #33
0
def execute(args, job_args):
    """
    Executes a weather/fire simulation.

    :param args: a dictionary with all to start the simulationfollowing keys
    :param job_args: a the original json given the forecast

    Keys in args:
    :param grid_code: the (unique) code of the grid that is used
    :param sys_install_path: system installation directory
    :param start_utc: start time of simulation in UTC
    :param end_utc: end time of simulation in UTC
    :param workspace_path: workspace directory
    :param wps_install_path: installation directory of WPS that will be used
    :param wrf_install_path: installation directory of WRF that will be used
    :param grib_source: a string identifying a valid GRIB2 source
    :param wps_namelist_path: the path to the namelist.wps file that will be used as template
    :param wrf_namelist_path: the path to the namelist.input file that will be used as template
    :param fire_namelist_path: the path to the namelist.fire file that will be used as template
    :param wps_geog_path: the path to the geogrid data directory providing terrain/fuel data
    :param email_notification: dictionary containing keys address and events indicating when a mail should be fired off
 
    
    """

    logging.info('step 0 initialize the job state from the arguments')
    ## logging.info('args = %s' % json.dumps(jargs, open(osp.join(jobdir,'input.json'),'w'), indent=4, separators=(',', ': ')))
    js = JobState(args)
    ## logging.info('js = %s' % json.dumps(js, open(osp.join(jobdir,'input.json'),'w'), indent=4, separators=(',', ': ')))

    jobdir = osp.abspath(osp.join(js.workspace_path, js.job_id))
    make_clean_dir(jobdir)

    json.dump(job_args,
              open(osp.join(jobdir, 'input.json'), 'w'),
              indent=4,
              separators=(',', ': '))
    jsub = make_job_file(js)
    json.dump(jsub, open(jsub.jobfile, 'w'), indent=4, separators=(',', ': '))

    logging.info("job %s starting [%d hours to forecast]." %
                 (js.job_id, js.fc_hrs))
    sys.stdout.flush()
    send_email(js, 'start', 'Job %s started.' % js.job_id)

    # read in all namelists
    js.wps_nml = read_namelist(js.args['wps_namelist_path'])
    js.wrf_nml = read_namelist(js.args['wrf_namelist_path'])
    js.fire_nml = read_namelist(js.args['fire_namelist_path'])
    js.ems_nml = None
    if 'emissions_namelist_path' in js.args:
        js.ems_nml = read_namelist(js.args['emissions_namelist_path'])

    # Parse and setup the domain configuration
    js.domain_conf = WPSDomainConf(js.domains)

    js.num_doms = len(js.domain_conf)
    js.wps_nml['share']['interval_seconds'] = js.grib_source[
        0].interval_seconds

    logging.info("number of domains defined is %d." % js.num_doms)

    # build directories in workspace
    js.wps_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wps'))
    js.wrf_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wrf'))

    #check_obj(args,'args')
    #check_obj(js,'Initial job state')

    logging.info("step 1: clone WPS and WRF directories")
    logging.info("cloning WPS into %s" % js.wps_dir)
    cln = WRFCloner(js.args)
    cln.clone_wps(js.wps_dir, [])
    js.grib_source[0].clone_vtables(js.wps_dir)

    logging.info(
        "step 2: process domain information and patch namelist for geogrid")
    js.wps_nml['share']['start_date'] = [utc_to_esmf(js.start_utc)
                                         ] * js.num_doms
    js.wps_nml['share']['end_date'] = [utc_to_esmf(js.end_utc)] * js.num_doms
    js.wps_nml['geogrid']['geog_data_path'] = js.args['wps_geog_path']
    js.domain_conf.prepare_for_geogrid(js.wps_nml, js.wrf_nml, js.wrfxpy_dir,
                                       js.wps_dir)
    f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True)

    # do steps 2 & 3 & 4 in parallel (two execution streams)
    #  -> GEOGRID ->
    #  -> GRIB2 download ->  UNGRIB ->

    proc_q = Queue()

    geogrid_proc = Process(target=run_geogrid, args=(js, proc_q))
    # grib_proc = Process(target=retrieve_gribs_and_run_ungrib_all, args=(js, proc_q, ref_utc))
    grib_proc = {}
    for grib_source in js.grib_source:
        grib_proc[grib_source.id] = Process(
            target=retrieve_gribs_and_run_ungrib,
            args=(js, grib_source, proc_q))

    logging.info('starting GEOGRID and GRIB2/UNGRIB')

    if js.ungrib_only:
        logging.info(
            'ungrib_only set, skipping GEOGRID, will exit after UNGRIB')
    else:
        geogrid_proc.start()

    for grib_source in js.grib_source:
        grib_proc[grib_source.id].start()

    # wait until all tasks are done
    logging.info('waiting until all tasks are done')

    for grib_source in js.grib_source:
        grib_proc[grib_source.id].join()

    if js.ungrib_only:
        return
    else:
        geogrid_proc.join()

    for grib_source in js.grib_source:
        if proc_q.get() != 'SUCCESS':
            return

    if proc_q.get() != 'SUCCESS':
        return

    proc_q.close()

    logging.info(
        "step 5: execute metgrid after ensuring all grids will be processed")
    update_namelist(js.wps_nml, js.grib_source[0].namelist_wps_keys())
    js.domain_conf.prepare_for_metgrid(js.wps_nml)
    logging.info("namelist.wps for METGRID: %s" %
                 json.dumps(js.wps_nml, indent=4, separators=(',', ': ')))
    f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True)

    logging.info("running METGRID")
    Metgrid(js.wps_dir).execute().check_output()

    send_email(js, 'metgrid', 'Job %s - metgrid complete.' % js.job_id)
    logging.info("METGRID complete")

    logging.info("cloning WRF into %s" % js.wrf_dir)

    logging.info(
        "step 6: clone wrf directory, symlink all met_em* files, make namelists"
    )
    cln.clone_wrf(js.wrf_dir, [])
    symlink_matching_files(js.wrf_dir, js.wps_dir, "met_em*")
    time_ctrl = update_time_control(js.start_utc, js.end_utc, js.num_doms)
    js.wrf_nml['time_control'].update(time_ctrl)
    js.wrf_nml['time_control']['interval_seconds'] = js.grib_source[
        0].interval_seconds
    update_namelist(js.wrf_nml, js.grib_source[0].namelist_keys())
    if 'ignitions' in js.args:
        update_namelist(js.wrf_nml, render_ignitions(js, js.num_doms))

    # if we have an emissions namelist, automatically turn on the tracers
    if js.ems_nml is not None:
        logging.debug('namelist.fire_emissions given, turning on tracers')
        f90nml.write(js.ems_nml,
                     osp.join(js.wrf_dir, 'namelist.fire_emissions'),
                     force=True)
        js.wrf_nml['dynamics']['tracer_opt'] = [2] * js.num_doms

    f90nml.write(js.wrf_nml,
                 osp.join(js.wrf_dir, 'namelist.input'),
                 force=True)

    f90nml.write(js.fire_nml,
                 osp.join(js.wrf_dir, 'namelist.fire'),
                 force=True)

    # step 7: execute real.exe

    logging.info("running REAL")
    # try to run Real twice as it sometimes fails the first time
    # it's not clear why this error happens
    try:
        Real(js.wrf_dir).execute().check_output()
    except Exception as e:
        logging.error('Real step failed with exception %s, retrying ...' %
                      str(e))
        Real(js.wrf_dir).execute().check_output()

    logging.info('step 7b: if requested, do fuel moisture DA')
    logging.info('fmda = %s' % js.fmda)
    if js.fmda is not None:
        logging.info('running fuel moisture data assimilation')
        for dom in js.fmda.domains:
            logging.info('assimilate_fm10_observations for domain %s' % dom)
            assimilate_fm10_observations(
                osp.join(js.wrf_dir, 'wrfinput_d%02d' % int(dom)), None,
                js.fmda.token)

    # step 8: execute wrf.exe on parallel backend
    logging.info('submitting WRF job')
    send_email(js, 'wrf_submit', 'Job %s - wrf job submitted.' % js.job_id)

    js.task_id = "sim-" + js.grid_code + "-" + utc_to_esmf(js.start_utc)[:10]
    jsub.job_num = WRF(js.wrf_dir, js.qsys).submit(js.task_id, js.num_nodes,
                                                   js.ppn, js.wall_time_hrs)

    send_email(
        js, 'wrf_exec',
        'Job %s - wrf job starting now with id %s.' % (js.job_id, js.task_id))
    logging.info(
        "WRF job %s submitted with id %s, waiting for rsl.error.0000" %
        (jsub.job_num, js.task_id))

    jobfile = osp.abspath(osp.join(js.workspace_path, js.job_id, 'job.json'))
    json.dump(jsub, open(jobfile, 'w'), indent=4, separators=(',', ': '))

    process_output(js.job_id)
Exemple #34
0
    def data_collection_hook(self):
        """Spawns data emulation using gphl simcal"""

        data_collect_parameters = self.current_dc_parameters

        if not HWR.beamline.gphl_workflow:
            raise ValueError("Emulator requires GPhL workflow installation")
        gphl_connection = HWR.beamline.gphl_connection
        if not gphl_connection:
            raise ValueError("Emulator requires GPhL connection installation")

        # Get program locations
        simcal_executive = gphl_connection.get_executable("simcal")
        # Get environmental variables
        envs = {
            "BDG_home":
            gphl_connection.software_paths["BDG_home"],
            "GPHL_INSTALLATION":
            gphl_connection.software_paths["GPHL_INSTALLATION"],
        }
        text_type = ConvertUtils.text_type
        for tag, val in self["environment_variables"].getProperties().items():
            envs[text_type(tag)] = text_type(val)

        # get crystal data
        sample_name = self.getProperty("default_sample_name")
        sample = HWR.beamline.sample_changer.getLoadedSample()
        if sample:
            ss0 = sample.getName()
            if ss0 and ss0.startswith(self.TEST_SAMPLE_PREFIX):
                sample_name = ss0[len(self.TEST_SAMPLE_PREFIX):]

        sample_dir = gphl_connection.software_paths.get("gphl_test_samples")
        if not sample_dir:
            raise ValueError(
                "Emulator requires gphl_test_samples dir specified")
        sample_dir = os.path.join(sample_dir, sample_name)
        if not os.path.isdir(sample_dir):
            raise ValueError("Sample data directory %s does not exist" %
                             sample_dir)
        crystal_file = os.path.join(sample_dir, "crystal.nml")
        if not os.path.isfile(crystal_file):
            raise ValueError("Emulator crystal data file %s does not exist" %
                             crystal_file)
        # in spite of the simcal_crystal_list name this returns an OrderdDict
        crystal_data = f90nml.read(crystal_file)["simcal_crystal_list"]
        if isinstance(crystal_data, list):
            crystal_data = crystal_data[0]

        input_data = self._get_simcal_input(data_collect_parameters,
                                            crystal_data)

        # NB outfile is the echo output of the input file;
        # image files templates are set in the input file
        file_info = data_collect_parameters["fileinfo"]
        if not os.path.exists(file_info["directory"]):
            os.makedirs(file_info["directory"])
        if not os.path.exists(file_info["directory"]):
            os.makedirs(file_info["directory"])
        infile = os.path.join(file_info["directory"],
                              "simcal_in_%s.nml" % self._counter)

        f90nml.write(input_data, infile, force=True)
        outfile = os.path.join(file_info["directory"],
                               "simcal_out_%s.nml" % self._counter)
        logfile = os.path.join(file_info["directory"],
                               "simcal_log_%s.txt" % self._counter)
        self._counter += 1
        hklfile = os.path.join(sample_dir, "sample.hkli")
        if not os.path.isfile(hklfile):
            raise ValueError("Emulator hkli file %s does not exist" % hklfile)
        command_list = [
            simcal_executive,
            "--input",
            infile,
            "--output",
            outfile,
            "--hkl",
            hklfile,
        ]

        for tag, val in self["simcal_options"].getProperties().items():
            command_list.extend(
                ConvertUtils.command_option(tag, val, prefix="--"))
        logging.getLogger("HWR").info("Executing command: %s", command_list)
        logging.getLogger("HWR").info("Executing environment: %s" %
                                      sorted(envs.items()))

        fp1 = open(logfile, "w")
        fp2 = subprocess.STDOUT
        # resource.setrlimit(resource.RLIMIT_STACK, (-1,-1))

        try:
            running_process = subprocess.Popen(command_list,
                                               stdout=fp1,
                                               stderr=fp2,
                                               env=envs)
            gphl_connection.collect_emulator_process = running_process
        except BaseException:
            logging.getLogger("HWR").error(
                "Error in spawning workflow application")
            raise
        finally:
            fp1.close()

        # This does waiting, so we want to collect the result afterwards
        super(CollectEmulator, self).data_collection_hook()

        logging.getLogger("HWR").info(
            "Waiting for simcal collection emulation.")
        # NBNB TODO put in time-out, somehow
        return_code = running_process.wait()
        process = gphl_connection.collect_emulator_process
        gphl_connection.collect_emulator_process = None
        if process == "ABORTED":
            logging.getLogger("HWR").info(
                "Simcal collection emulation aborted")
        elif return_code:
            raise RuntimeError(
                "simcal process terminated with return code %s" % return_code)
        else:
            logging.getLogger("HWR").info(
                "Simcal collection emulation successful")

        return
Exemple #35
0
    def setup(self):

        # TODO: Find a better place to generate this list
        self.config_files = [f for f in os.listdir(self.control_path)
                             if f.startswith('data')]
        self.config_files.append('eedata')

        # Generic model setup
        super(Mitgcm, self).setup()

        # Link restart files to work directory
        if self.prior_restart_path and not self.expt.repeat_run:

            # Determine total number of timesteps since initialisation
            core_restarts = [f for f in os.listdir(self.prior_restart_path)
                             if f.startswith('pickup.')]
            try:
                # NOTE: Use the most recent, in case of multiple restarts
                n_iter0 = max([int(f.split('.')[1]) for f in core_restarts])
            except ValueError:
                sys.exit("payu: error: no restart files found.")
        else:
            n_iter0 = 0

        # Update configuration file 'data'

        data_path = os.path.join(self.work_path, 'data')
        data_nml = f90nml.read(data_path)

        # NOTE: Assumes that these are always present
        dt = data_nml['parm03']['deltat']
        n_timesteps = data_nml['parm03']['ntimesteps']

        # NOTE: Consider permitting pchkpt_freq < dt * n_timesteps
        # NOTE: May re-enable chkpt_freq in the future
        data_nml['parm03']['niter0'] = n_iter0
        data_nml['parm03']['pchkptfreq'] = dt * n_timesteps
        data_nml['parm03']['chkptfreq'] = 0

        data_nml.write(data_path, force=True)

        # Patch or create data.mnc
        mnc_header = os.path.join(self.work_path, 'mnc_')

        data_mnc_path = os.path.join(self.work_path, 'data.mnc')
        try:
            data_mnc_nml = f90nml.read(data_mnc_path)
            data_mnc_nml['mnc_01']['mnc_outdir_str'] = mnc_header
            data_mnc_nml.write(data_mnc_path, force=True)

        except IOError as exc:
            if exc.errno == errno.ENOENT:

                mnc_01_grp = {
                    'mnc_use_outdir':   True,
                    'mnc_use_name_ni0': True,
                    'mnc_outdir_str':   mnc_header,
                    'mnc_outdir_date':  True,
                    'monitor_mnc':      True
                }
                data_mnc_nml = {'mnc_01': mnc_01_grp}

                f90nml.write(data_mnc_nml, data_mnc_path)
            else:
                raise
Exemple #36
0
    def prepare_for_metgrid(self, wps_nml):
        """
        Set all domains that we use to active.

        :param wps_nml: the WPS namelist
        """
        wps_nml['share']['active_grid'] = [True] * len(self.domains)
        

if __name__ == '__main__':
    import f90nml
    import sys
    import json

    # parse a JSON domain configuration and a namelist.wps file and build the correct wps
    if len(sys.argv) != 3 and len(sys.argv) != 4:
        print('usage: %s <domains_json_file> <namelist.wps> [namelist.input]' % sys.argv[0])
        sys.exit(1)

    dcfg = WPSDomainConf(json.load(open(sys.argv[1])))
    
    wps_nml = f90nml.read(sys.argv[2])
    wrf_nml = f90nml.read(sys.argv[3]) if len(sys.argv) == 4 else None
    dcfg.prepare_for_geogrid(wps_nml, wrf_nml)

    f90nml.write(wps_nml, sys.argv[2], force=True)
    if wrf_nml is not None:
        f90nml.write(wps_nml, sys.argv[3], force=True)


Exemple #37
0
def write_ppt_files(tasks):
    freqgroups = cmor_utils.group(tasks, get_output_freq)
    for freq1 in freqgroups:
        for freq2 in freqgroups:
            if freq2 > freq1 and freq2 % freq1 == 0:
                freqgroups[freq2] = freqgroups[freq1] + freqgroups[freq2]
    for freq in freqgroups:
        mfp2df, mfpphy, mfp3dfs, mfp3dfp, mfp3dfv = [], [], [], [], []
        alevs, plevs, hlevs = [], [], []
        for task in freqgroups[freq]:
            zaxis, levs = cmor_target.get_z_axis(task.target)
            root_codes = task.source.get_root_codes()
            if not zaxis:
                for code in root_codes:
                    if code in cmor_source.ifs_source.grib_codes_3D:
                        log.warning(
                            "3D grib code %s used in 2D cmor-target %s..."
                            "assuming this is on model levels" %
                            (str(code), task.target.variable))
                        mfp3dfs.append(code)
                    elif code in cmor_source.ifs_source.grib_codes_2D_dyn:
                        log.info(
                            "Adding grib code %s to MFP2DF %dhr ppt file for variable "
                            "%s in table %s" %
                            (str(code), freq, task.target.variable,
                             task.target.table))
                        mfp2df.append(code)
                    elif code in cmor_source.ifs_source.grib_codes_2D_phy:
                        log.info(
                            "Adding grib code %s to MFPPHY %dhr ppt file for variable "
                            "%s in table %s" %
                            (str(code), freq, task.target.variable,
                             task.target.table))
                        mfpphy.append(code)
                    else:
                        log.error("Unknown IFS grib code %s skipped" %
                                  str(code))
            else:
                for code in root_codes:
                    if code in cmor_source.ifs_source.grib_codes_3D:
                        if zaxis in cmor_target.model_axes:
                            log.info(
                                "Adding grib code %s to MFP3DFS %dhr ppt file for variable "
                                "%s in table %s" %
                                (str(code), freq, task.target.variable,
                                 task.target.table))
                            mfp3dfs.append(code)
                            alevs.extend(levs)
                        elif zaxis in cmor_target.pressure_axes:
                            log.info(
                                "Adding grib code %s to MFP3DFP %dhr ppt file for variable "
                                "%s in table %s" %
                                (str(code), freq, task.target.variable,
                                 task.target.table))
                            mfp3dfp.append(code)
                            plevs.extend(levs)
                        elif zaxis in cmor_target.height_axes:
                            log.info(
                                "Adding grib code %s to MFP3DFV %dhr ppt file for variable "
                                "%s in table %s" %
                                (str(code), freq, task.target.variable,
                                 task.target.table))
                            mfp3dfv.append(code)
                            hlevs.extend(levs)
                        else:
                            log.error(
                                "Axis type %s unknown, adding grib code %s"
                                "to model level variables" %
                                (zaxis, str(code)))
                    elif code in cmor_source.ifs_source.grib_codes_2D_dyn:
                        mfp2df.append(code)
                    elif code in cmor_source.ifs_source.grib_codes_2D_phy:
                        mfpphy.append(code)
                    else:
                        log.error("Unknown IFS grib code %s skipped" %
                                  str(code))
        # Always add the geopotential, recommended by ECMWF
        if cmor_source.grib_code(129) not in mfp3dfs:
            mfp2df.append(cmor_source.grib_code(129))
        # Always add the surface pressure, recommended by ECMWF
        mfpphy.append(cmor_source.grib_code(134))
        # Always add the logarithm of surface pressure, recommended by ECMWF
        mfp2df.append(cmor_source.grib_code(152))
        mfp2df = sorted(
            list(
                map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(),
                    set(mfp2df))))
        mfpphy = sorted(
            list(
                map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(),
                    set(mfpphy))))
        mfp3dfs = sorted(
            list(
                map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(),
                    set(mfp3dfs))))
        mfp3dfp = sorted(
            list(
                map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(),
                    set(mfp3dfp))))
        mfp3dfv = sorted(
            list(
                map(lambda c: c.var_id if c.tab_id == 128 else c.__hash__(),
                    set(mfp3dfv))))
        plevs = sorted(list(set([float(s) for s in plevs])))[::-1]
        hlevs = sorted(list(set([float(s) for s in hlevs])))
        namelist = {"CFPFMT": "MODEL"}
        if any(mfp2df):
            namelist["NFP2DF"] = len(mfp2df)
            namelist["MFP2DF"] = mfp2df
        if any(mfpphy):
            namelist["NFPPHY"] = len(mfpphy)
            namelist["MFPPHY"] = mfpphy
        if any(mfp3dfs):
            namelist["NFP3DFS"] = len(mfp3dfs)
            namelist["MFP3DFS"] = mfp3dfs
            namelist["NRFP3S"] = -1
        if any(mfp3dfp):
            namelist["NFP3DFP"] = len(mfp3dfp)
            namelist["MFP3DFP"] = mfp3dfp
            namelist["RFP3P"] = plevs
        if any(mfp3dfs):
            namelist["NFP3DFV"] = len(mfp3dfv)
            namelist["MFP3DFV"] = mfp3dfv
            namelist["RFP3V"] = hlevs
        nml = f90nml.Namelist({"NAMFPC": namelist})
        nml.uppercase, nml.end_comma = True, True
        f90nml.write(nml, "pptdddddd%04d" % (100 * freq, ))
Exemple #38
0
def _write_default():
    """create default par file"""
    if not os.path.isfile(PAR_DFLT_FILE):
        f90nml.write(PAR_DEFAULT, PAR_DFLT_FILE)
    def data_collection_hook(self):
        """Spawns data emulation using gphl simcal"""

        data_collect_parameters = self.current_dc_parameters

        logging.getLogger('HWR').debug("Emulator: nominal position "
            + ', '.join('%s=%s' % (tt)
                        for tt in sorted(data_collect_parameters['motors'].items())
                        if tt[1] is not None)
        )

        logging.getLogger('HWR').debug("Emulator:  actual position "
             + ', '.join('%s=%s' % tt
                         for tt in sorted(self.diffractometer_hwobj.get_positions().items())
                         if tt[1] is not None)
        )

        # Done here as there are what-happens-first conflicts
        # if you put it in init
        bl_setup_hwobj = HardwareRepository().getHardwareObject(
            'beamline-setup'
        )
        if self.gphl_workflow_hwobj is None:
            self.gphl_workflow_hwobj = bl_setup_hwobj.gphl_workflow_hwobj
        if not self.gphl_workflow_hwobj:
            raise ValueError("Emulator requires GPhL workflow installation")
        if self.gphl_connection_hwobj is None:
            self.gphl_connection_hwobj = bl_setup_hwobj.gphl_connection_hwobj
        if not self.gphl_connection_hwobj:
            raise ValueError("Emulator requires GPhL connection installation")

        # Get program locations
        simcal_executive = self.gphl_connection_hwobj.get_executable('simcal')
        # Get environmental variables
        envs = {'BDG_home':
                    self.gphl_connection_hwobj.software_paths['BDG_home'],
                'GPHL_INSTALLATION':
                    self.gphl_connection_hwobj.software_paths['GPHL_INSTALLATION']
                }
        for tag, val in self['environment_variables'].getProperties().items():
            envs[str(tag)] = str(val)

        # get crystal data
        sample_name = self.getProperty('default_sample_name')
        sample = self.sample_changer_hwobj.getLoadedSample()
        if sample:
            ss = sample.getName()
            if ss and ss.startswith(self.TEST_SAMPLE_PREFIX):
                sample_name  = ss[len(self.TEST_SAMPLE_PREFIX):]

        sample_dir = self.gphl_connection_hwobj.software_paths.get(
            'gphl_test_samples'
        )
        if not sample_dir:
            raise ValueError(
                "Emulator requires gphl_test_samples dir specified"
            )
        sample_dir = os.path.join(sample_dir, sample_name)
        if not os.path.isdir(sample_dir):
            raise ValueError(
                "Sample data directory %s does not exist" % sample_dir
            )
        crystal_file = os.path.join(sample_dir, 'crystal.nml')
        if not os.path.isfile(crystal_file):
            raise ValueError(
                "Emulator crystal data file %s does not exist" % crystal_file
            )
        # in spite of the simcal_crystal_list name this returns an OrderdDict
        crystal_data = f90nml.read(crystal_file)['simcal_crystal_list']

        input_data = self._get_simcal_input(data_collect_parameters,
                                            crystal_data)

        # NB outfile is the echo output of the input file;
        # image files templates are set in the input file
        file_info = data_collect_parameters['fileinfo']
        if not os.path.exists(file_info['directory']):
            os.makedirs(file_info['directory'])
        if not os.path.exists(file_info['directory']):
            os.makedirs(file_info['directory'])
        infile = os.path.join(file_info['directory'],
                              'simcal_in_%s.nml' % self._counter)

        f90nml.write(input_data, infile, force=True)
        outfile = os.path.join(file_info['directory'],
                               'simcal_out_%s.nml' % self._counter)
        logfile = os.path.join(file_info['directory'],
                               'simcal_log_%s.txt' % self._counter)
        self._counter += 1
        hklfile = os.path.join(sample_dir, 'sample.hkli')
        if not os.path.isfile(hklfile):
            raise ValueError(
                "Emulator hkli file %s does not exist" % hklfile
            )
        command_list = [simcal_executive, '--input', infile, '--output', outfile,
                        '--hkl', hklfile]

        for tag, val in self['simcal_options'].getProperties().items():
            command_list.extend(ConvertUtils.command_option(tag, val,
                                                            prefix='--'))
        logging.getLogger('HWR').info("Executing command: %s" % command_list)
        logging.getLogger('HWR').info("Executing environment: %s"
                                      % sorted(envs.items()))


        fp1 = open(logfile, 'w')
        fp2 = subprocess.STDOUT
        # resource.setrlimit(resource.RLIMIT_STACK, (-1,-1))

        try:
            running_process = subprocess.Popen(command_list, stdout=fp1,
                                               stderr=fp2, env=envs)
        except:
            logging.getLogger('HWR').error('Error in spawning workflow application')
            raise
        finally:
            fp1.close()

        # This does waiting, so we want to collect the result afterwards
        super(CollectEmulator, self).data_collection_hook()

        logging.getLogger('HWR').info(
            'Waiting for simcal collection emulation.'
        )
        # NBNB TODO put in time-out, somehow
        if running_process is not None:
            return_code = running_process.wait()
            if return_code:
                raise RuntimeError("simcal process terminated with return code %s"
                                   % return_code)
            else:
                logging.getLogger('HWR').info(
                    'Simcal collection emulation successful'
                )

        return
Exemple #40
0
    def setup(self):

        cpl_keys = {'cice': ('input_ice.nml', 'coupling_nml', 'runtime0'),
                    'matm': ('input_atm.nml', 'coupling', 'truntime0')}

        # Keep track of this in order to set the oasis runtime.
        run_runtime = 0

        for model in self.expt.models:

            if model.model_type == 'cice':

                # Stage the supplemental input files
                if model.prior_restart_path:
                    for f_name in model.access_restarts:
                        f_src = os.path.join(model.prior_restart_path, f_name)
                        f_dst = os.path.join(model.work_input_path, f_name)

                        if os.path.isfile(f_src):
                            make_symlink(f_src, f_dst)

            if model.model_type in ('cice', 'matm'):

                # Update the supplemental OASIS namelists
                cpl_fname, cpl_group, runtime0_key = cpl_keys[model.model_type]

                cpl_fpath = os.path.join(model.work_path, cpl_fname)
                cpl_nml = f90nml.read(cpl_fpath)

                # Which calendar are we using, noleap or Gregorian.
                caltype = cpl_nml[cpl_group]['caltype']
                init_date = cal.int_to_date(cpl_nml[cpl_group]['init_date'])

                # Get time info about the beginning of this run. We're
                # interested in:
                #   1. start date of run
                #   2. total runtime of all previous runs.
                if model.prior_output_path and not self.expt.repeat_run:

                    prior_cpl_fpath = os.path.join(model.prior_output_path,
                                                   cpl_fname)
                    prior_cpl_nml = f90nml.read(prior_cpl_fpath)
                    cpl_nml_grp = prior_cpl_nml[cpl_group]

                    # The total time in seconds since the beginning of
                    # the experiment.
                    total_runtime = int(cpl_nml_grp[runtime0_key] +
                                        cpl_nml_grp['runtime'])
                    run_start_date = cal.date_plus_seconds(init_date,
                                                           total_runtime,
                                                           caltype)

                else:
                    total_runtime = 0
                    run_start_date = init_date

                # Get new runtime for this run. We get this from either the
                # 'runtime' part of the payu config, or from the namelist
                if self.expt.runtime:
                    run_runtime = cal.runtime_from_date(
                        run_start_date,
                        self.expt.runtime['years'],
                        self.expt.runtime['months'],
                        self.expt.runtime['days'],
                        self.expt.runtime.get('seconds', 0),
                        caltype)
                else:
                    run_runtime = cpl_nml[cpl_group]['runtime']

                # Now write out new run start date and total runtime.
                cpl_nml[cpl_group]['inidate'] = cal.date_to_int(run_start_date)
                cpl_nml[cpl_group][runtime0_key] = total_runtime
                cpl_nml[cpl_group]['runtime'] = int(run_runtime)

                if model.model_type == 'cice':
                    if self.expt.counter and not self.expt.repeat_run:
                        cpl_nml[cpl_group]['jobnum'] = 1 + self.expt.counter
                    else:
                        cpl_nml[cpl_group]['jobnum'] = 1

                nml_work_path = os.path.join(model.work_path, cpl_fname)
                f90nml.write(cpl_nml, nml_work_path + '~')
                shutil.move(nml_work_path + '~', nml_work_path)

        # Now change the oasis runtime. This needs to be done after the others.
        for model in self.expt.models:
            if model.model_type == 'oasis':
                namcouple = os.path.join(model.work_path, 'namcouple')

                s = ''
                with open(namcouple, 'r+') as f:
                    s = f.read()
                    m = re.search(r"^[ \t]*\$RUNTIME.*?^[ \t]*(\d+)", s,
                                  re.MULTILINE | re.DOTALL)
                    assert(m is not None)
                    s = s[:m.start(1)] + str(run_runtime) + s[m.end(1):]

                with open(namcouple, 'w') as f:
                    f.write(s)
    def data_collection_hook(self):
        """Spawns data emulation using gphl simcal"""

        data_collect_parameters = self.current_dc_parameters

        if not api.gphl_workflow:
            raise ValueError("Emulator requires GPhL workflow installation")
        gphl_connection = api.gphl_connection
        if not gphl_connection:
            raise ValueError("Emulator requires GPhL connection installation")

        # Get program locations
        simcal_executive = gphl_connection.get_executable("simcal")
        # Get environmental variables
        envs = {
            "BDG_home": gphl_connection.software_paths["BDG_home"],
            "GPHL_INSTALLATION": gphl_connection.software_paths[
                "GPHL_INSTALLATION"
            ],
        }
        for tag, val in self["environment_variables"].getProperties().items():
            envs[str(tag)] = str(val)

        # get crystal data
        sample_name = self.getProperty("default_sample_name")
        sample = self.sample_changer_hwobj.getLoadedSample()
        if sample:
            ss0 = sample.getName()
            if ss0 and ss0.startswith(self.TEST_SAMPLE_PREFIX):
                sample_name = ss0[len(self.TEST_SAMPLE_PREFIX) :]

        sample_dir = gphl_connection.software_paths.get("gphl_test_samples")
        if not sample_dir:
            raise ValueError("Emulator requires gphl_test_samples dir specified")
        sample_dir = os.path.join(sample_dir, sample_name)
        if not os.path.isdir(sample_dir):
            raise ValueError("Sample data directory %s does not exist" % sample_dir)
        crystal_file = os.path.join(sample_dir, "crystal.nml")
        if not os.path.isfile(crystal_file):
            raise ValueError(
                "Emulator crystal data file %s does not exist" % crystal_file
            )
        # in spite of the simcal_crystal_list name this returns an OrderdDict
        crystal_data = f90nml.read(crystal_file)["simcal_crystal_list"]
        if isinstance(crystal_data, list):
            crystal_data = crystal_data[0]

        input_data = self._get_simcal_input(data_collect_parameters, crystal_data)

        # NB outfile is the echo output of the input file;
        # image files templates are set in the input file
        file_info = data_collect_parameters["fileinfo"]
        if not os.path.exists(file_info["directory"]):
            os.makedirs(file_info["directory"])
        if not os.path.exists(file_info["directory"]):
            os.makedirs(file_info["directory"])
        infile = os.path.join(
            file_info["directory"], "simcal_in_%s.nml" % self._counter
        )

        f90nml.write(input_data, infile, force=True)
        outfile = os.path.join(
            file_info["directory"], "simcal_out_%s.nml" % self._counter
        )
        logfile = os.path.join(
            file_info["directory"], "simcal_log_%s.txt" % self._counter
        )
        self._counter += 1
        hklfile = os.path.join(sample_dir, "sample.hkli")
        if not os.path.isfile(hklfile):
            raise ValueError("Emulator hkli file %s does not exist" % hklfile)
        command_list = [
            simcal_executive,
            "--input",
            infile,
            "--output",
            outfile,
            "--hkl",
            hklfile,
        ]

        for tag, val in self["simcal_options"].getProperties().items():
            command_list.extend(ConvertUtils.command_option(tag, val, prefix="--"))
        logging.getLogger("HWR").info("Executing command: %s", command_list)
        logging.getLogger("HWR").info(
            "Executing environment: %s" % sorted(envs.items())
        )

        fp1 = open(logfile, "w")
        fp2 = subprocess.STDOUT
        # resource.setrlimit(resource.RLIMIT_STACK, (-1,-1))

        try:
            running_process = subprocess.Popen(
                command_list, stdout=fp1, stderr=fp2, env=envs
            )
            gphl_connection.collect_emulator_process = running_process
        except BaseException:
            logging.getLogger("HWR").error("Error in spawning workflow application")
            raise
        finally:
            fp1.close()

        # This does waiting, so we want to collect the result afterwards
        super(CollectEmulator, self).data_collection_hook()

        logging.getLogger("HWR").info("Waiting for simcal collection emulation.")
        # NBNB TODO put in time-out, somehow
        return_code = running_process.wait()
        process = gphl_connection.collect_emulator_process
        gphl_connection.collect_emulator_process = None
        if process == 'ABORTED':
            logging.getLogger("HWR").info("Simcal collection emulation aborted")
        elif return_code:
            raise RuntimeError(
                "simcal process terminated with return code %s" % return_code
            )
        else:
            logging.getLogger("HWR").info("Simcal collection emulation successful")

        return
Exemple #42
0
    def setup(self):

        # TODO: Find a better place to generate this list
        self.config_files = [f for f in os.listdir(self.control_path)
                             if f.startswith('data')]
        self.config_files.append('eedata')

        # Generic model setup
        super(Mitgcm, self).setup()

        # Link restart files to work directory
        if self.prior_restart_path and not self.expt.repeat_run:

            # Determine total number of timesteps since initialisation
            core_restarts = [f for f in os.listdir(self.prior_restart_path)
                             if f.startswith('pickup.')]
            try:
                # NOTE: Use the most recent, in case of multiple restarts
                n_iter0 = max([int(f.split('.')[1]) for f in core_restarts])
            except ValueError:
                sys.exit("payu: error: no restart files found.")
        else:
            n_iter0 = 0

        # Update configuration file 'data'

        data_path = os.path.join(self.work_path, 'data')

        # MITgcm strips shell-style (#) comments from its namelists
        nml_parser = f90nml.Parser()
        nml_parser.comment_tokens += '#'

        data_nml = nml_parser.read(data_path)

        # Timesteps are either global (deltat) or divided into momentum
        # (deltatmom) and tracer (deltat).  If deltat is missing, then we just
        # try deltatmom.  But I am not sure how to best handle this case.

        # TODO: Sort this out with an MITgcm user
        try:
            dt = data_nml['parm03']['deltat']
        except KeyError:
            dt = data_nml['parm03']['deltatmom']

        # Runtime seems to be set either by timesteps (ntimesteps) or physical
        # time (startTime and endTime).

        # TODO: Sort this out with an MITgcm user
        try:
            n_timesteps = data_nml['parm03']['ntimesteps']
            pchkpt_freq = dt * n_timesteps
        except KeyError:
            t_start = data_nml['parm03']['starttime']
            t_end = data_nml['parm03']['endtime']
            pchkpt_freq = t_end - t_start

        # NOTE: Consider permitting pchkpt_freq < dt * n_timesteps
        # NOTE: May re-enable chkpt_freq in the future
        data_nml['parm03']['niter0'] = n_iter0
        data_nml['parm03']['pchkptfreq'] = pchkpt_freq
        data_nml['parm03']['chkptfreq'] = 0

        data_nml.write(data_path, force=True)

        # Patch or create data.mnc
        mnc_header = os.path.join(self.work_path, 'mnc_')

        data_mnc_path = os.path.join(self.work_path, 'data.mnc')
        try:
            data_mnc_nml = f90nml.read(data_mnc_path)
            data_mnc_nml['mnc_01']['mnc_outdir_str'] = mnc_header
            data_mnc_nml.write(data_mnc_path, force=True)

        except IOError as exc:
            if exc.errno == errno.ENOENT:

                mnc_01_grp = {
                    'mnc_use_outdir':   True,
                    'mnc_use_name_ni0': True,
                    'mnc_outdir_str':   mnc_header,
                    'mnc_outdir_date':  True,
                    'monitor_mnc':      True
                }
                data_mnc_nml = {'mnc_01': mnc_01_grp}

                f90nml.write(data_mnc_nml, data_mnc_path)
            else:
                raise
Exemple #43
0
def execute(args):
    """
    Executes a weather/fire simulation.

    The args dictionary contains

    :param args: a dictionary with the following keys
    :param grid_code: the (unique) code of the grid that is used
    :param sys_install_path: system installation directory
    :param start_utc: start time of simulation in UTC
    :param end_utc: end time of simulation in UTC
    :param workspace_path: workspace directory
    :param wps_install_path: installation directory of WPS that will be used
    :param wrf_install_path: installation directory of WRF that will be used
    :param grib_source: a string identifying a valid GRIB2 source
    :param wps_namelist_path: the path to the namelist.wps file that will be used as template
    :param wrf_namelist_path: the path to the namelist.input file that will be used as template
    :param fire_namelist_path: the path to the namelist.fire file that will be used as template
    :param wps_geog_path: the path to the geogrid data directory providing terrain/fuel data
    :param email_notification: dictionary containing keys address and events indicating when a mail should be fired off
    """
    logging.basicConfig(level=logging.INFO)

    # initialize the job state from the arguments
    js = JobState(args)

    logging.info("job %s starting [%d hours to forecast]." % (js.job_id, js.fc_hrs))
    send_email(js, 'start', 'Job %s started.' % js.job_id)

    # read in all namelists
    js.wps_nml = f90nml.read(args['wps_namelist_path'])
    js.wrf_nml = f90nml.read(args['wrf_namelist_path'])
    js.fire_nml = f90nml.read(args['fire_namelist_path'])
    js.ems_nml = None
    if 'emissions_namelist_path' in args:
        js.ems_nml = f90nml.read(args['emissions_namelist_path'])
    
    # Parse and setup the domain configuration
    js.domain_conf = WPSDomainConf(js.domains)

    num_doms = len(js.domain_conf)
    js.wps_nml['share']['start_date'] = [utc_to_esmf(js.start_utc)] * num_doms
    js.wps_nml['share']['end_date'] = [utc_to_esmf(js.end_utc)] * num_doms
    js.wps_nml['share']['interval_seconds'] = 3600

    logging.info("number of domains defined is %d." % num_doms)

    # build directories in workspace
    js.wps_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wps'))
    js.wrf_dir = osp.abspath(osp.join(js.workspace_path, js.job_id, 'wrf'))

    logging.info("cloning WPS into %s" % js.wps_dir)

    # step 1: clone WPS and WRF directories
    cln = WRFCloner(args)
    cln.clone_wps(js.wps_dir, js.grib_source.vtables(), [])

    # step 2: process domain information and patch namelist for geogrid
    js.wps_nml['geogrid']['geog_data_path'] = args['wps_geog_path']
    js.domain_conf.prepare_for_geogrid(js.wps_nml, js.wrf_nml, js.wrfxpy_dir, js.wps_dir)
    f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True)

    # do steps 2 & 3 & 4 in parallel (two execution streams)
    #  -> GEOGRID ->
    #  -> GRIB2 download ->  UNGRIB ->

    proc_q = Queue()
    geogrid_proc = Process(target=run_geogrid, args=(js, proc_q))
    grib_proc = Process(target=retrieve_gribs_and_run_ungrib, args=(js, proc_q))

    geogrid_proc.start()
    grib_proc.start()

    # wait until both tasks are done
    geogrid_proc.join()
    grib_proc.join()

    if proc_q.get() != 'SUCCESS':
        return

    if proc_q.get() != 'SUCCESS':
        return

    proc_q.close()

    # step 5: execute metgrid after ensuring all grids will be processed
    js.domain_conf.prepare_for_metgrid(js.wps_nml)
    f90nml.write(js.wps_nml, osp.join(js.wps_dir, 'namelist.wps'), force=True)

    logging.info("running METGRID")
    Metgrid(js.wps_dir).execute().check_output()

    send_email(js, 'metgrid', 'Job %s - metgrid complete.' % js.job_id)
    logging.info("cloning WRF into %s" % js.wrf_dir)

    # step 6: clone wrf directory, symlink all met_em* files
    cln.clone_wrf(js.wrf_dir, [])
    symlink_matching_files(js.wrf_dir, js.wps_dir, "met_em*")

    logging.info("running REAL")

    # step 7: patch input namelist, fire namelist, emissions namelist (if required)
    #         and execute real.exe
    time_ctrl = update_time_control(js.start_utc, js.end_utc, num_doms)
    js.wrf_nml['time_control'].update(time_ctrl)
    update_namelist(js.wrf_nml, js.grib_source.namelist_keys())
    if 'ignitions' in args:
        update_namelist(js.wrf_nml, render_ignitions(js, num_doms))

    # if we have an emissions namelist, automatically turn on the tracers
    if js.ems_nml is not None:
        f90nml.write(js.ems_nml, osp.join(js.wrf_dir, 'namelist.fire_emissions'), force=True)
        js.wrf_nml['dynamics']['tracer_opt'] = [2] * num_doms

    f90nml.write(js.wrf_nml, osp.join(js.wrf_dir, 'namelist.input'), force=True)

    f90nml.write(js.fire_nml, osp.join(js.wrf_dir, 'namelist.fire'), force=True)

    # try to run Real twice as it sometimes fails the first time
    # it's not clear why this error happens 
    try:
        Real(js.wrf_dir).execute().check_output()
    except Exception as e:
        logging.error('Real step failed with exception %s, retrying ...' % str(e))
        Real(js.wrf_dir).execute().check_output()
    

    # step 8: if requested, do fuel moisture DA
    if js.fmda is not None:
        logging.info('running fuel moisture data assimilation')
        for dom in js.fmda.domains:
            assimilate_fm10_observations(osp.join(wrf_dir, 'wrfinput_d%02d' % dom), None, js.fmda.token)

    logging.info('submitting WRF job')
    send_email(js, 'wrf_submit', 'Job %s - wrf job submitted.' % js.job_id)

    # step 8: execute wrf.exe on parallel backend
    js.task_id = "sim-" + js.grid_code + "-" + utc_to_esmf(js.start_utc)[:10]
    WRF(js.wrf_dir, js.qsys).submit(js.task_id, js.num_nodes, js.ppn, js.wall_time_hrs)

    send_email(js, 'wrf_exec', 'Job %s - wrf job starting now with id %s.' % (js.job_id, js.task_id))
    logging.info("WRF job submitted with id %s, waiting for rsl.error.0000" % js.task_id)

    # step 9: wait for appearance of rsl.error.0000 and open it
    wrf_out = None
    while wrf_out is None:
        try:
            wrf_out = open(osp.join(js.wrf_dir, 'rsl.error.0000'))
            break
        except IOError:
            logging.info('forecast: waiting 10 seconds for rsl.error.0000 file')
        
        time.sleep(5)
    
    logging.info('Detected rsl.error.0000')

    # step 10: track log output and check for history writes fro WRF
    pp = None
    already_sent_files, max_pp_dom = [], -1
    if js.postproc is not None:
        js.pp_dir = osp.join(js.workspace_path, js.job_id, "products")
        make_dir(js.pp_dir)
        pp = Postprocessor(js.pp_dir, 'wfc-' + js.grid_code)
	max_pp_dom = max([int(x) for x in filter(lambda x: len(x) == 1, js.postproc)])

    while True:
        line = wrf_out.readline().strip()
        if not line:
            time.sleep(0.2)
            continue

        if "SUCCESS COMPLETE WRF" in line:
            send_email(js, 'complete', 'Job %s - wrf job complete SUCCESS.' % js.job_id)
            logging.info("WRF completion detected.")
            break

        if "Timing for Writing wrfout" in line:
            esmf_time,domain_str = re.match(r'.*wrfout_d.._([0-9_\-:]{19}) for domain\ +(\d+):' ,line).groups()
            dom_id = int(domain_str)
            logging.info("Detected history write for domain %d for time %s." % (dom_id, esmf_time))
            if js.postproc is not None and str(dom_id) in js.postproc:
                var_list = [str(x) for x in js.postproc[str(dom_id)]]
                logging.info("Executing postproc instructions for vars %s for domain %d." % (str(var_list), dom_id))
                wrfout_path = find_fresh_wrfout(js.wrf_dir, dom_id)
            try:
                pp.process_vars(wrfout_path, dom_id, esmf_time, var_list)
            except Exception as e:
                logging.warning('Failed to postprocess for time %s with error %s.' % (esmf_time, str(e)))

            # if this is the last processed domain for this timestamp in incremental mode, upload to server
            if dom_id == max_pp_dom and js.postproc.get('shuttle', None) == 'incremental':
                desc = js.postproc['description'] if 'description' in js.postproc else js.job_id
                sent_files_1 = send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, desc, already_sent_files)
                logging.info('sent %d files to visualization server.'  % len(sent_files_1))
                already_sent_files = filter(lambda x: not x.endswith('json'), already_sent_files + sent_files_1)

    # if we are to send out the postprocessed files after completion, this is the time
    if js.postproc.get('shuttle', None) == 'on_completion':
        desc = js.postproc['description'] if 'description' in js.postproc else js.job_id
        send_product_to_server(args, js.pp_dir, js.job_id, js.job_id, desc)
Exemple #44
0
def parse():
    parser = argparse.ArgumentParser()

    parser.add_argument('--version', action='version',
                        version='f90nml {}'.format(f90nml.__version__))

    parser.add_argument('--group', '-g', action='store')
    parser.add_argument('--set', '-s', action='append')
    parser.add_argument('--patch', '-p', action='store_false')

    parser.add_argument('input')
    parser.add_argument('output', nargs='?')

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit()

    args = parser.parse_args()

    input_fname = args.input
    output_fname = args.output

    # Input config

    if input_fname:
        input_root, input_ext = os.path.splitext(input_fname)
        if input_ext == '.json':
            with open(input_fname) as input_file:
                input_data = json.load(input_file)

        elif has_yaml and input_ext == '.yaml':
            with open(input_fname) as input_file:
                input_data = yaml.load(input_file)

        else:
            # Assume unrecognised extensions are namelists
            input_data = f90nml.read(input_fname)
    else:
        input_data = {}

    # Replace any values

    if args.set:
        if not args.group:
            # Use the first available group
            grp = list(input_data.keys())[0]
            print('f90nml: warning: Assuming variables are in group \'{}\'.'
                  ''.format(grp))
        else:
            grp = args.group

        update_nml = '&{} {} /\n'.format(grp, ', '.join(args.set))
        with io.StringIO(update_nml) as update_io:
            update_data = f90nml.read(update_io)

        input_data[grp].update(update_data[grp])

    # Target output

    if output_fname:
        output_root, output_ext = os.path.splitext(output_fname)

        if output_ext == '.json':
            with open(output_fname, 'w') as output_file:
                json.dump(input_data, output_file,
                          indent=4, separators=(',', ': '))
                output_file.write('\n')

        elif has_yaml and output_ext == '.yaml':
            with open(output_fname, 'w') as output_file:
                yaml.dump(input_data, output_file)

        else:
            # Default to namelist output
            f90nml.write(input_data, output_fname)
    else:
        # TODO: Combine with extension output
        f90nml.write(input_data, sys.stdout)
Exemple #45
0
def write_input_dic(
        run_type_dir, new_input_dic
):
    f90nml.write(new_input_dic,
                 os.path.join(run_type_dir, NAMELIST_INPUT),
                 run_type_dir)
Exemple #46
0
    def setup(self):
        if not self.top_level_model:
            return

        cpl_keys = {'cice': ('input_ice.nml', 'coupling', 'runtime0'),
                    'matm': ('input_atm.nml', 'coupling', 'truntime0')}

        # Keep track of this in order to set the oasis runtime.
        run_runtime = 0

        for model in self.expt.models:

            if model.model_type == 'cice' or model.model_type == 'cice5':

                # Horrible hack to make a link to o2i.nc in the
                # work/ice/RESTART directory
                f_name = 'o2i.nc'
                f_src = os.path.join(model.work_path, f_name)
                f_dst = os.path.join(model.work_restart_path, f_name)

                if os.path.isfile(f_src):
                    make_symlink(f_src, f_dst)

            if model.model_type == 'cice5':

                # Stage the supplemental input files
                if model.prior_restart_path:
                    for f_name in model.access_restarts:
                        f_src = os.path.join(model.prior_restart_path, f_name)
                        f_dst = os.path.join(model.work_input_path, f_name)

                        if os.path.isfile(f_src):
                            make_symlink(f_src, f_dst)

            if model.model_type in ('cice', 'matm'):

                # Update the supplemental OASIS namelists
                cpl_fname, cpl_group, runtime0_key = cpl_keys[model.model_type]

                cpl_fpath = os.path.join(model.work_path, cpl_fname)
                cpl_nml = f90nml.read(cpl_fpath)

                # Which calendar are we using, noleap or Gregorian.
                caltype = cpl_nml[cpl_group]['caltype']
                init_date = cal.int_to_date(cpl_nml[cpl_group]['init_date'])

                # Get time info about the beginning of this run. We're
                # interested in:
                #   1. start date of run
                #   2. total runtime of all previous runs.
                if model.prior_restart_path and not self.expt.repeat_run:

                    prior_cpl_fpath = os.path.join(model.prior_restart_path,
                                                   cpl_fname)

                    # With later versions this file exists in the prior restart
                    # path, but this was not always the case, so check, and if
                    # not there use prior output path
                    if not os.path.exists(prior_cpl_fpath):
                        print('payu: warning: {0} missing from prior restart '
                              'path; checking prior output.'.format(cpl_fname),
                              file=sys.stderr)
                        if not os.path.isdir(model.prior_output_path):
                            print('payu: error: No prior output path; '
                                  'aborting run.')
                            sys.exit(errno.ENOENT)

                        prior_cpl_fpath = os.path.join(model.prior_output_path,
                                                       cpl_fname)

                    try:
                        prior_cpl_nml = f90nml.read(prior_cpl_fpath)
                    except IOError as exc:
                        if exc.errno == errno.ENOENT:
                            print('payu: error: {0} does not exist; aborting.'
                                  ''.format(prior_cpl_fpath), file=sys.stderr)
                            sys.exit(exc.errno)
                        else:
                            raise

                    cpl_nml_grp = prior_cpl_nml[cpl_group]

                    # The total time in seconds since the beginning of
                    # the experiment.
                    total_runtime = int(cpl_nml_grp[runtime0_key] +
                                        cpl_nml_grp['runtime'])
                    run_start_date = cal.date_plus_seconds(init_date,
                                                           total_runtime,
                                                           caltype)

                else:
                    total_runtime = 0
                    run_start_date = init_date

                # Get new runtime for this run. We get this from either the
                # 'runtime' part of the payu config, or from the namelist
                if self.expt.runtime:
                    run_runtime = cal.runtime_from_date(
                        run_start_date,
                        self.expt.runtime['years'],
                        self.expt.runtime['months'],
                        self.expt.runtime['days'],
                        self.expt.runtime.get('seconds', 0),
                        caltype)
                else:
                    run_runtime = cpl_nml[cpl_group]['runtime']

                # Now write out new run start date and total runtime.
                cpl_nml[cpl_group]['inidate'] = cal.date_to_int(run_start_date)
                cpl_nml[cpl_group][runtime0_key] = total_runtime
                cpl_nml[cpl_group]['runtime'] = int(run_runtime)

                if model.model_type == 'cice':
                    if self.expt.counter and not self.expt.repeat_run:
                        cpl_nml[cpl_group]['jobnum'] = 1 + self.expt.counter
                    else:
                        cpl_nml[cpl_group]['jobnum'] = 1

                nml_work_path = os.path.join(model.work_path, cpl_fname)
                f90nml.write(cpl_nml, nml_work_path + '~')
                shutil.move(nml_work_path + '~', nml_work_path)

        # Now change the oasis runtime. This needs to be done after the others.
        for model in self.expt.models:
            if model.model_type == 'oasis':
                namcouple = os.path.join(model.work_path, 'namcouple')

                s = ''
                with open(namcouple, 'r+') as f:
                    s = f.read()
                    m = re.search(r"^[ \t]*\$RUNTIME.*?^[ \t]*(\d+)", s,
                                  re.MULTILINE | re.DOTALL)
                    assert(m is not None)
                    s = s[:m.start(1)] + str(run_runtime) + s[m.end(1):]

                with open(namcouple, 'w') as f:
                    f.write(s)