def backup_scripts(): current = cluster.scriptsdir main_a = cluster.archivedir() + '/DART-WRF/' old_a = main_a + '/old/' os.makedirs(cluster.archivedir(), exist_ok=True) os.makedirs(main_a, exist_ok=True) os.makedirs(old_a, exist_ok=True) def func(a, b, method): # call method if not link or directory if os.path.islink(a) or os.path.isdir(a): pass else: method(a, b) # archive existing files for f in os.listdir(main_a): func(os.path.join(main_a, f), old_a + '/' + f, shutil.move) # reproducibility for f in ['scheduler.py', 'config/clusters.py', 'config/cfg.py']: fname = os.path.basename(f) func(current + '/../' + f, main_a + '/' + fname, shutil.copy) for f in os.listdir(current): func(os.path.join(current, f), main_a + '/', shutil.copy)
def run(iens, begin, end, hist_interval=5, radt=5, archive=True): """ Args: archive (bool): if True, write to archivedir of experiment if False, write to WRF run directory """ rundir = cluster.wrf_rundir(iens) print(rundir) copy(cluster.namelist, rundir + '/namelist.input') sed_inplace(rundir + '/namelist.input', '<dx>', str(int(exp.model_dx))) #sed_inplace(rundir+'/namelist.input', '<timestep>', str(int(exp.timestep))) sed_inplace(rundir + '/namelist.input', '<hist_interval>', str(int(hist_interval))) sed_inplace(rundir + '/namelist.input', '<radt>', str(int(radt))) if archive: archdir = cluster.archivedir() + begin.strftime('/%Y-%m-%d_%H:%M/' + str(iens) + '/') os.makedirs(archdir, exist_ok=True) else: archdir = './' print('namelist for run from', begin, end, 'output to', archdir) sed_inplace(rundir + '/namelist.input', '<archivedir>', archdir) # set times for k, v in { '<y1>': '%Y', '<m1>': '%m', '<d1>': '%d', '<HH1>': '%H', '<MM1>': '%M' }.items(): sed_inplace(rundir + '/namelist.input', k, begin.strftime(v)) for k, v in { '<y2>': '%Y', '<m2>': '%m', '<d2>': '%d', '<HH2>': '%H', '<MM2>': '%M' }.items(): sed_inplace(rundir + '/namelist.input', k, end.strftime(v)) ######################### if archive: init_dir = cluster.archivedir() + begin.strftime( '/%Y-%m-%d_%H:%M/') + str(iens) os.makedirs(init_dir, exist_ok=True) try: print('copy wrfinput of this run to archive') wrfin_old = rundir + '/wrfinput_d01' wrfin_arch = init_dir + '/wrfinput_d01' copy(wrfin_old, wrfin_arch) print('copy namelist to archive') copy(rundir + '/namelist.input', init_dir + '/namelist.input') except Exception as e: warnings.warn(str(e))
def create_satimages(init_time, depends_on=None): s = my_Slurm("pRTTOV", cfg_update={"ntasks": "48", "time": "30"}) s.run(cluster.python + ' /home/fs71386/lkugler/RTTOV-WRF/run_init.py ' + cluster.archivedir() + init_time.strftime('/%Y-%m-%d_%H:%M/'), depends_on=[depends_on])
def assimilate(assim_time, prior_init_time, prior_path_exp=False, depends_on=None): """Creates observations from a nature run and assimilates them. Args: assim_time (dt.datetime): timestamp of prior wrfout files prior_init_time (dt.datetime): timestamp to find the directory where the prior wrfout files are prior_path_exp (bool or str): put a `str` to take the prior from a different experiment if False: use `archivedir` (defined in config) to get prior state if str: use this directory to get prior state """ if not prior_path_exp: prior_path_exp = cluster.archivedir() elif not isinstance(prior_path_exp, str): raise TypeError('prior_path_exp either str or False, is ' + str(type(prior_path_exp))) # prepare state of nature run, from which observation is sampled #s = my_Slurm("prepNature", cfg_update=dict(time="2")) #id = s.run(cluster.python+' '+cluster.scriptsdir+'/prepare_nature.py ' # +time.strftime('%Y-%m-%d_%H:%M'), depends_on=[depends_on]) # prepare prior model state s = my_Slurm("preAssim", cfg_update=dict(time="2")) id = s.run(cluster.python + ' ' + cluster.scriptsdir + '/pre_assim.py ' + assim_time.strftime('%Y-%m-%d_%H:%M ') + prior_init_time.strftime('%Y-%m-%d_%H:%M ') + prior_path_exp, depends_on=[depends_on]) # prepare nature run, generate observations s = my_Slurm("Assim", cfg_update={ "nodes": "1", "ntasks": "96", "time": "30", "mem": "300G", "ntasks-per-node": "96", "ntasks-per-core": "2" }) id = s.run(cluster.python + ' ' + cluster.scriptsdir + '/assim_synth_obs.py ' + assim_time.strftime('%Y-%m-%d_%H:%M'), depends_on=[id]) # # actuall assimilation step # s = my_Slurm("Assim", cfg_update=dict(nodes="1", ntasks="48", time="50", mem="200G")) # cmd = 'cd '+cluster.dartrundir+'; mpirun -np 48 ./filter; rm obs_seq_all.out' # id = s.run(cmd, depends_on=[id]) # s = my_Slurm("archiveAssim", cfg_update=dict(time="10")) # id = s.run(cluster.python+' '+cluster.scriptsdir+'/archive_assim.py ' # + assim_time.strftime('%Y-%m-%d_%H:%M'), depends_on=[id]) s = my_Slurm("updateIC", cfg_update=dict(time="8")) id = s.run(cluster.python + ' ' + cluster.scriptsdir + '/update_wrfinput_from_filteroutput.py ' + assim_time.strftime('%Y-%m-%d_%H:%M ') + prior_init_time.strftime('%Y-%m-%d_%H:%M ') + prior_path_exp, depends_on=[id]) return id
submitting jobs into SLURM queue """ import os, sys, shutil, glob import datetime as dt from slurmpy import Slurm from config.cfg import exp, cluster from scripts.utils import script_to_str, symlink # necessary to find modules in folder, since SLURM runs the script elsewhere sys.path.append(os.getcwd()) # allow scripts to access the configuration symlink(cluster.scriptsdir + '/../config', cluster.scriptsdir + '/config') log_dir = cluster.archivedir() + '/logs/' slurm_scripts_dir = cluster.archivedir() + '/slurm-scripts/' print('logging to', log_dir) print('scripts, which are submitted to SLURM:', slurm_scripts_dir) def my_Slurm(*args, cfg_update=dict(), **kwargs): """Shortcut to slurmpy's class; keep certain default kwargs and only update some with kwarg `cfg_update` see https://github.com/brentp/slurmpy """ return Slurm(*args, slurm_kwargs=dict(cluster.slurm_cfg, **cfg_update), log_dir=log_dir, scripts_dir=slurm_scripts_dir, **kwargs)
1) prepare nature run for DART optional: 2) calculate obs-error from parametrization 3) create obs_seq.in with obs-errors from 2) 4) generate actual observations (obs_seq.out) with obs_seq.in from 3) - calculate obs-error from parametrization 1) create obs_seq.in with obs error=0 2) calculate y_nat = H(x_nature) and y_ens = H(x_ensemble) 3) calculate obs error as function of y_nat, y_ensmean Assumptions: - x_ensemble is already linked for DART to advance_temp<iens>/wrfout_d01 """ time = dt.datetime.strptime(sys.argv[1], '%Y-%m-%d_%H:%M') archive_time = cluster.archivedir()+time.strftime('/%Y-%m-%d_%H:%M/') os.chdir(cluster.dartrundir) os.system('rm -f obs_seq.in obs_seq.out obs_seq.final') # remove any existing observation files n_stages = len(exp.observations) for istage, obscfg in enumerate(exp.observations): print('running observation stage', istage, obscfg) archive_stage = archive_time + '/assim_stage'+str(istage)+'/' n_obs = obscfg['n_obs'] n_obs_3d = n_obs * len(obscfg['heights']) sat_channel = obscfg.get('sat_channel', False) error_generate = obscfg['error_generate'] error_assimilate = obscfg['error_assimilate']
import os, sys, warnings, glob import datetime as dt from config.cfg import exp, cluster from utils import symlink, copy_scp_srvx8, copy, mkdir, mkdir_srvx8, clean_wrfdir # if cluster.name != 'srvx8': # copy = copy_scp_srvx8 # mkdir = mkdir_srvx8 time = dt.datetime.strptime(sys.argv[1], '%Y-%m-%d_%H:%M') try: print('archive obs space diagnostics') savedir = cluster.archivedir() + '/obs_seq_final/' mkdir(savedir) copy(cluster.dartrundir + '/obs_seq.final', savedir + time.strftime('/%Y-%m-%d_%H:%M_obs_seq.final')) except Exception as e: warnings.warn(str(e)) try: print('archive regression diagnostics') savedir = cluster.archivedir() + '/reg_factor/' mkdir(savedir) copy(cluster.dartrundir + '/reg_diagnostics', savedir + time.strftime('/%Y-%m-%d_%H:%M_reg_diagnostics')) except Exception as e: warnings.warn(str(e)) print('archive model state') try:
"""Apply observation operator to some ensemble state i.e. wrfout files in an archive directory output: saves obs_seq.final files these contain the Hx values usually applied to 1 min forecasts to assess the posterior analysis quality (analysis+1minute = 'posterior') """ if __name__ == '__main__': prev_forecast_init = dt.datetime.strptime(sys.argv[1], '%Y-%m-%d_%H:%M') time = dt.datetime.strptime(sys.argv[2], '%Y-%m-%d_%H:%M') exppath_firstguess = cluster.archivedir() print(prev_forecast_init, time) # link ensemble states to run_DART directory # we want the observation operator applied to these states! pre_assim.run(time, prev_forecast_init, exppath_firstguess) savedir = cluster.archivedir()+'/obs_seq_final_1min/' n_stages = len(exp.observations) for istage, obscfg in enumerate(exp.observations): n_obs = obscfg['n_obs'] sat_channel = obscfg.get('sat_channel', False) obscfg['folder_obs_coords'] = False